prometheus-client 1.0.0 → 4.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/LICENSE +201 -0
- data/README.md +147 -63
- data/lib/prometheus/client/data_stores/README.md +1 -1
- data/lib/prometheus/client/data_stores/direct_file_store.rb +63 -25
- data/lib/prometheus/client/histogram.rb +41 -11
- data/lib/prometheus/client/label_set_validator.rb +10 -4
- data/lib/prometheus/client/metric.rb +30 -10
- data/lib/prometheus/client/push.rb +126 -12
- data/lib/prometheus/client/registry.rb +4 -4
- data/lib/prometheus/client/summary.rb +17 -3
- data/lib/prometheus/client/version.rb +1 -1
- data/lib/prometheus/middleware/collector.rb +12 -4
- data/lib/prometheus/middleware/exporter.rb +8 -3
- metadata +10 -10
@@ -29,7 +29,7 @@ module Prometheus
|
|
29
29
|
|
30
30
|
class DirectFileStore
|
31
31
|
class InvalidStoreSettingsError < StandardError; end
|
32
|
-
AGGREGATION_MODES = [MAX = :max, MIN = :min, SUM = :sum, ALL = :all]
|
32
|
+
AGGREGATION_MODES = [MAX = :max, MIN = :min, SUM = :sum, ALL = :all, MOST_RECENT = :most_recent]
|
33
33
|
DEFAULT_METRIC_SETTINGS = { aggregation: SUM }
|
34
34
|
DEFAULT_GAUGE_SETTINGS = { aggregation: ALL }
|
35
35
|
|
@@ -45,7 +45,7 @@ module Prometheus
|
|
45
45
|
end
|
46
46
|
|
47
47
|
settings = default_settings.merge(metric_settings)
|
48
|
-
validate_metric_settings(settings)
|
48
|
+
validate_metric_settings(metric_type, settings)
|
49
49
|
|
50
50
|
MetricStore.new(metric_name: metric_name,
|
51
51
|
store_settings: @store_settings,
|
@@ -54,7 +54,7 @@ module Prometheus
|
|
54
54
|
|
55
55
|
private
|
56
56
|
|
57
|
-
def validate_metric_settings(metric_settings)
|
57
|
+
def validate_metric_settings(metric_type, metric_settings)
|
58
58
|
unless metric_settings.has_key?(:aggregation) &&
|
59
59
|
AGGREGATION_MODES.include?(metric_settings[:aggregation])
|
60
60
|
raise InvalidStoreSettingsError,
|
@@ -65,6 +65,11 @@ module Prometheus
|
|
65
65
|
raise InvalidStoreSettingsError,
|
66
66
|
"Only :aggregation setting can be specified"
|
67
67
|
end
|
68
|
+
|
69
|
+
if metric_settings[:aggregation] == MOST_RECENT && metric_type != :gauge
|
70
|
+
raise InvalidStoreSettingsError,
|
71
|
+
"Only :gauge metrics support :most_recent aggregation"
|
72
|
+
end
|
68
73
|
end
|
69
74
|
|
70
75
|
class MetricStore
|
@@ -74,6 +79,7 @@ module Prometheus
|
|
74
79
|
@metric_name = metric_name
|
75
80
|
@store_settings = store_settings
|
76
81
|
@values_aggregation_mode = metric_settings[:aggregation]
|
82
|
+
@store_opened_by_pid = nil
|
77
83
|
|
78
84
|
@lock = Monitor.new
|
79
85
|
end
|
@@ -100,10 +106,15 @@ module Prometheus
|
|
100
106
|
end
|
101
107
|
|
102
108
|
def increment(labels:, by: 1)
|
109
|
+
if @values_aggregation_mode == DirectFileStore::MOST_RECENT
|
110
|
+
raise InvalidStoreSettingsError,
|
111
|
+
"The :most_recent aggregation does not support the use of increment"\
|
112
|
+
"/decrement"
|
113
|
+
end
|
114
|
+
|
103
115
|
key = store_key(labels)
|
104
116
|
in_process_sync do
|
105
|
-
|
106
|
-
internal_store.write_value(key, value + by.to_f)
|
117
|
+
internal_store.increment_value(key, by.to_f)
|
107
118
|
end
|
108
119
|
end
|
109
120
|
|
@@ -121,7 +132,7 @@ module Prometheus
|
|
121
132
|
stores_for_metric.each do |file_path|
|
122
133
|
begin
|
123
134
|
store = FileMappedDict.new(file_path, true)
|
124
|
-
store.all_values.each do |(labelset_qs, v)|
|
135
|
+
store.all_values.each do |(labelset_qs, v, ts)|
|
125
136
|
# Labels come as a query string, and CGI::parse returns arrays for each key
|
126
137
|
# "foo=bar&x=y" => { "foo" => ["bar"], "x" => ["y"] }
|
127
138
|
# Turn the keys back into symbols, and remove the arrays
|
@@ -129,7 +140,7 @@ module Prometheus
|
|
129
140
|
[k.to_sym, vs.first]
|
130
141
|
end.to_h
|
131
142
|
|
132
|
-
stores_data[label_set] << v
|
143
|
+
stores_data[label_set] << [v, ts]
|
133
144
|
end
|
134
145
|
ensure
|
135
146
|
store.close if store
|
@@ -181,30 +192,41 @@ module Prometheus
|
|
181
192
|
end
|
182
193
|
|
183
194
|
def aggregate_values(values)
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
elsif @values_aggregation_mode == ALL
|
191
|
-
values.first
|
195
|
+
# Each entry in the `values` array is a tuple of `value` and `timestamp`,
|
196
|
+
# so for all aggregations except `MOST_RECENT`, we need to only take the
|
197
|
+
# first value in each entry and ignore the second.
|
198
|
+
if @values_aggregation_mode == MOST_RECENT
|
199
|
+
latest_tuple = values.max { |a,b| a[1] <=> b[1] }
|
200
|
+
latest_tuple.first # return the value without the timestamp
|
192
201
|
else
|
193
|
-
|
194
|
-
|
202
|
+
values = values.map(&:first) # Discard timestamps
|
203
|
+
|
204
|
+
if @values_aggregation_mode == SUM
|
205
|
+
values.inject { |sum, element| sum + element }
|
206
|
+
elsif @values_aggregation_mode == MAX
|
207
|
+
values.max
|
208
|
+
elsif @values_aggregation_mode == MIN
|
209
|
+
values.min
|
210
|
+
elsif @values_aggregation_mode == ALL
|
211
|
+
values.first
|
212
|
+
else
|
213
|
+
raise InvalidStoreSettingsError,
|
214
|
+
"Invalid Aggregation Mode: #{ @values_aggregation_mode }"
|
215
|
+
end
|
195
216
|
end
|
196
217
|
end
|
197
218
|
end
|
198
219
|
|
199
220
|
private_constant :MetricStore
|
200
221
|
|
201
|
-
# A dict of doubles, backed by an file we access directly
|
222
|
+
# A dict of doubles, backed by an file we access directly as a byte array.
|
202
223
|
#
|
203
224
|
# The file starts with a 4 byte int, indicating how much of it is used.
|
204
225
|
# Then 4 bytes of padding.
|
205
226
|
# There's then a number of entries, consisting of a 4 byte int which is the
|
206
227
|
# size of the next field, a utf-8 encoded string key, padding to an 8 byte
|
207
|
-
# alignment, and then a 8 byte float which is the value
|
228
|
+
# alignment, and then a 8 byte float which is the value, and then a 8 byte
|
229
|
+
# float which is the unix timestamp when the value was set.
|
208
230
|
class FileMappedDict
|
209
231
|
INITIAL_FILE_SIZE = 1024*1024
|
210
232
|
|
@@ -235,8 +257,8 @@ module Prometheus
|
|
235
257
|
with_file_lock do
|
236
258
|
@positions.map do |key, pos|
|
237
259
|
@f.seek(pos)
|
238
|
-
value = @f.read(
|
239
|
-
[key, value]
|
260
|
+
value, timestamp = @f.read(16).unpack('dd')
|
261
|
+
[key, value, timestamp]
|
240
262
|
end
|
241
263
|
end
|
242
264
|
end
|
@@ -256,9 +278,25 @@ module Prometheus
|
|
256
278
|
init_value(key)
|
257
279
|
end
|
258
280
|
|
281
|
+
now = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
282
|
+
pos = @positions[key]
|
283
|
+
@f.seek(pos)
|
284
|
+
@f.write([value, now].pack('dd'))
|
285
|
+
@f.flush
|
286
|
+
end
|
287
|
+
|
288
|
+
def increment_value(key, by)
|
289
|
+
if !@positions.has_key?(key)
|
290
|
+
init_value(key)
|
291
|
+
end
|
292
|
+
|
259
293
|
pos = @positions[key]
|
260
294
|
@f.seek(pos)
|
261
|
-
@f.
|
295
|
+
value = @f.read(8).unpack('d')[0]
|
296
|
+
|
297
|
+
now = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
298
|
+
@f.seek(-8, :CUR)
|
299
|
+
@f.write([value + by, now].pack('dd'))
|
262
300
|
@f.flush
|
263
301
|
end
|
264
302
|
|
@@ -299,7 +337,7 @@ module Prometheus
|
|
299
337
|
def init_value(key)
|
300
338
|
# Pad to be 8-byte aligned.
|
301
339
|
padded = key + (' ' * (8 - (key.length + 4) % 8))
|
302
|
-
value = [padded.length, padded, 0.0].pack("lA#{padded.length}
|
340
|
+
value = [padded.length, padded, 0.0, 0.0].pack("lA#{padded.length}dd")
|
303
341
|
while @used + value.length > @capacity
|
304
342
|
@capacity *= 2
|
305
343
|
resize_file(@capacity)
|
@@ -310,7 +348,7 @@ module Prometheus
|
|
310
348
|
@f.seek(0)
|
311
349
|
@f.write([@used].pack('l'))
|
312
350
|
@f.flush
|
313
|
-
@positions[key] = @used -
|
351
|
+
@positions[key] = @used - 16
|
314
352
|
end
|
315
353
|
|
316
354
|
# Read position of all keys. No locking is performed.
|
@@ -320,7 +358,7 @@ module Prometheus
|
|
320
358
|
padded_len = @f.read(4).unpack('l')[0]
|
321
359
|
key = @f.read(padded_len).unpack("A#{padded_len}")[0].strip
|
322
360
|
@positions[key] = @f.pos
|
323
|
-
@f.seek(
|
361
|
+
@f.seek(16, :CUR)
|
324
362
|
end
|
325
363
|
end
|
326
364
|
end
|
@@ -6,7 +6,7 @@ module Prometheus
|
|
6
6
|
module Client
|
7
7
|
# A histogram samples observations (usually things like request durations
|
8
8
|
# or response sizes) and counts them in configurable buckets. It also
|
9
|
-
# provides a sum of all observed values.
|
9
|
+
# provides a total count and sum of all observed values.
|
10
10
|
class Histogram < Metric
|
11
11
|
# DEFAULT_BUCKETS are the default Histogram buckets. The default buckets
|
12
12
|
# are tailored to broadly measure the response time (in seconds) of a
|
@@ -33,21 +33,41 @@ module Prometheus
|
|
33
33
|
store_settings: store_settings)
|
34
34
|
end
|
35
35
|
|
36
|
+
def self.linear_buckets(start:, width:, count:)
|
37
|
+
count.times.map { |idx| start.to_f + idx * width }
|
38
|
+
end
|
39
|
+
|
40
|
+
def self.exponential_buckets(start:, factor: 2, count:)
|
41
|
+
count.times.map { |idx| start.to_f * factor ** idx }
|
42
|
+
end
|
43
|
+
|
36
44
|
def with_labels(labels)
|
37
|
-
self.class.new(name,
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
45
|
+
new_metric = self.class.new(name,
|
46
|
+
docstring: docstring,
|
47
|
+
labels: @labels,
|
48
|
+
preset_labels: preset_labels.merge(labels),
|
49
|
+
buckets: @buckets,
|
50
|
+
store_settings: @store_settings)
|
51
|
+
|
52
|
+
# The new metric needs to use the same store as the "main" declared one, otherwise
|
53
|
+
# any observations on that copy with the pre-set labels won't actually be exported.
|
54
|
+
new_metric.replace_internal_store(@store)
|
55
|
+
|
56
|
+
new_metric
|
43
57
|
end
|
44
58
|
|
45
59
|
def type
|
46
60
|
:histogram
|
47
61
|
end
|
48
62
|
|
63
|
+
# Records a given value. The recorded value is usually positive
|
64
|
+
# or zero. A negative value is accepted but prevents current
|
65
|
+
# versions of Prometheus from properly detecting counter resets
|
66
|
+
# in the sum of observations. See
|
67
|
+
# https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
|
68
|
+
# for details.
|
49
69
|
def observe(value, labels: {})
|
50
|
-
bucket = buckets.find {|upper_limit| upper_limit
|
70
|
+
bucket = buckets.find {|upper_limit| upper_limit >= value }
|
51
71
|
bucket = "+Inf" if bucket.nil?
|
52
72
|
|
53
73
|
base_label_set = label_set_for(labels)
|
@@ -81,19 +101,29 @@ module Prometheus
|
|
81
101
|
|
82
102
|
# Returns all label sets with their values expressed as hashes with their buckets
|
83
103
|
def values
|
84
|
-
|
104
|
+
values = @store.all_values
|
85
105
|
|
86
|
-
result =
|
106
|
+
result = values.each_with_object({}) do |(label_set, v), acc|
|
87
107
|
actual_label_set = label_set.reject{|l| l == :le }
|
88
108
|
acc[actual_label_set] ||= @buckets.map{|b| [b.to_s, 0.0]}.to_h
|
89
109
|
acc[actual_label_set][label_set[:le].to_s] = v
|
90
110
|
end
|
91
111
|
|
92
|
-
result.each do |(
|
112
|
+
result.each do |(_label_set, v)|
|
93
113
|
accumulate_buckets(v)
|
94
114
|
end
|
95
115
|
end
|
96
116
|
|
117
|
+
def init_label_set(labels)
|
118
|
+
base_label_set = label_set_for(labels)
|
119
|
+
|
120
|
+
@store.synchronize do
|
121
|
+
(buckets + ["+Inf", "sum"]).each do |bucket|
|
122
|
+
@store.set(labels: base_label_set.merge(le: bucket.to_s), val: 0)
|
123
|
+
end
|
124
|
+
end
|
125
|
+
end
|
126
|
+
|
97
127
|
private
|
98
128
|
|
99
129
|
# Modifies the passed in parameter
|
@@ -5,8 +5,8 @@ module Prometheus
|
|
5
5
|
# LabelSetValidator ensures that all used label sets comply with the
|
6
6
|
# Prometheus specification.
|
7
7
|
class LabelSetValidator
|
8
|
-
|
9
|
-
|
8
|
+
BASE_RESERVED_LABELS = [:pid].freeze
|
9
|
+
LABEL_NAME_REGEX = /\A[a-zA-Z_][a-zA-Z0-9_]*\Z/
|
10
10
|
|
11
11
|
class LabelSetError < StandardError; end
|
12
12
|
class InvalidLabelSetError < LabelSetError; end
|
@@ -59,9 +59,15 @@ module Prometheus
|
|
59
59
|
end
|
60
60
|
|
61
61
|
def validate_name(key)
|
62
|
-
|
62
|
+
if key.to_s.start_with?('__')
|
63
|
+
raise ReservedLabelError, "label #{key} must not start with __"
|
64
|
+
end
|
65
|
+
|
66
|
+
unless key.to_s =~ LABEL_NAME_REGEX
|
67
|
+
raise InvalidLabelError, "label name must match /#{LABEL_NAME_REGEX}/"
|
68
|
+
end
|
63
69
|
|
64
|
-
|
70
|
+
true
|
65
71
|
end
|
66
72
|
|
67
73
|
def validate_reserved_key(key)
|
@@ -7,7 +7,7 @@ module Prometheus
|
|
7
7
|
module Client
|
8
8
|
# Metric
|
9
9
|
class Metric
|
10
|
-
attr_reader :name, :docstring, :preset_labels
|
10
|
+
attr_reader :name, :docstring, :labels, :preset_labels
|
11
11
|
|
12
12
|
def initialize(name,
|
13
13
|
docstring:,
|
@@ -29,18 +29,28 @@ module Prometheus
|
|
29
29
|
@docstring = docstring
|
30
30
|
@preset_labels = stringify_values(preset_labels)
|
31
31
|
|
32
|
+
@all_labels_preset = false
|
33
|
+
if preset_labels.keys.length == labels.length
|
34
|
+
@validator.validate_labelset!(preset_labels)
|
35
|
+
@all_labels_preset = true
|
36
|
+
end
|
37
|
+
|
32
38
|
@store = Prometheus::Client.config.data_store.for_metric(
|
33
39
|
name,
|
34
40
|
metric_type: type,
|
35
41
|
metric_settings: store_settings
|
36
42
|
)
|
37
43
|
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
44
|
+
# WARNING: Our internal store can be replaced later by `with_labels`
|
45
|
+
# Everything we do after this point needs to still work if @store gets replaced
|
46
|
+
init_label_set({}) if labels.empty?
|
47
|
+
end
|
48
|
+
|
49
|
+
protected def replace_internal_store(new_store)
|
50
|
+
@store = new_store
|
42
51
|
end
|
43
52
|
|
53
|
+
|
44
54
|
# Returns the value for the given label set
|
45
55
|
def get(labels: {})
|
46
56
|
label_set = label_set_for(labels)
|
@@ -48,11 +58,21 @@ module Prometheus
|
|
48
58
|
end
|
49
59
|
|
50
60
|
def with_labels(labels)
|
51
|
-
self.class.new(name,
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
61
|
+
new_metric = self.class.new(name,
|
62
|
+
docstring: docstring,
|
63
|
+
labels: @labels,
|
64
|
+
preset_labels: preset_labels.merge(labels),
|
65
|
+
store_settings: @store_settings)
|
66
|
+
|
67
|
+
# The new metric needs to use the same store as the "main" declared one, otherwise
|
68
|
+
# any observations on that copy with the pre-set labels won't actually be exported.
|
69
|
+
new_metric.replace_internal_store(@store)
|
70
|
+
|
71
|
+
new_metric
|
72
|
+
end
|
73
|
+
|
74
|
+
def init_label_set(labels)
|
75
|
+
@store.set(labels: label_set_for(labels), val: 0)
|
56
76
|
end
|
57
77
|
|
58
78
|
# Returns all label sets with their values
|
@@ -1,11 +1,15 @@
|
|
1
1
|
# encoding: UTF-8
|
2
2
|
|
3
|
+
require 'base64'
|
3
4
|
require 'thread'
|
4
5
|
require 'net/http'
|
5
6
|
require 'uri'
|
7
|
+
require 'erb'
|
8
|
+
require 'set'
|
6
9
|
|
7
10
|
require 'prometheus/client'
|
8
11
|
require 'prometheus/client/formats/text'
|
12
|
+
require 'prometheus/client/label_set_validator'
|
9
13
|
|
10
14
|
module Prometheus
|
11
15
|
# Client is a ruby implementation for a Prometheus compatible client.
|
@@ -13,23 +17,41 @@ module Prometheus
|
|
13
17
|
# Push implements a simple way to transmit a given registry to a given
|
14
18
|
# Pushgateway.
|
15
19
|
class Push
|
20
|
+
class HttpError < StandardError; end
|
21
|
+
class HttpRedirectError < HttpError; end
|
22
|
+
class HttpClientError < HttpError; end
|
23
|
+
class HttpServerError < HttpError; end
|
24
|
+
|
16
25
|
DEFAULT_GATEWAY = 'http://localhost:9091'.freeze
|
17
26
|
PATH = '/metrics/job/%s'.freeze
|
18
|
-
INSTANCE_PATH = '/metrics/job/%s/instance/%s'.freeze
|
19
27
|
SUPPORTED_SCHEMES = %w(http https).freeze
|
20
28
|
|
21
|
-
attr_reader :job, :
|
29
|
+
attr_reader :job, :gateway, :path
|
30
|
+
|
31
|
+
def initialize(job:, gateway: DEFAULT_GATEWAY, grouping_key: {}, **kwargs)
|
32
|
+
raise ArgumentError, "job cannot be nil" if job.nil?
|
33
|
+
raise ArgumentError, "job cannot be empty" if job.empty?
|
34
|
+
@validator = LabelSetValidator.new(expected_labels: grouping_key.keys)
|
35
|
+
@validator.validate_symbols!(grouping_key)
|
22
36
|
|
23
|
-
def initialize(job, instance = nil, gateway = nil)
|
24
37
|
@mutex = Mutex.new
|
25
38
|
@job = job
|
26
|
-
@instance = instance
|
27
39
|
@gateway = gateway || DEFAULT_GATEWAY
|
28
|
-
@
|
40
|
+
@grouping_key = grouping_key
|
41
|
+
@path = build_path(job, grouping_key)
|
42
|
+
|
29
43
|
@uri = parse("#{@gateway}#{@path}")
|
44
|
+
validate_no_basic_auth!(@uri)
|
30
45
|
|
31
46
|
@http = Net::HTTP.new(@uri.host, @uri.port)
|
32
47
|
@http.use_ssl = (@uri.scheme == 'https')
|
48
|
+
@http.open_timeout = kwargs[:open_timeout] if kwargs[:open_timeout]
|
49
|
+
@http.read_timeout = kwargs[:read_timeout] if kwargs[:read_timeout]
|
50
|
+
end
|
51
|
+
|
52
|
+
def basic_auth(user, password)
|
53
|
+
@user = user
|
54
|
+
@password = password
|
33
55
|
end
|
34
56
|
|
35
57
|
def add(registry)
|
@@ -64,26 +86,118 @@ module Prometheus
|
|
64
86
|
raise ArgumentError, "#{url} is not a valid URL: #{e}"
|
65
87
|
end
|
66
88
|
|
67
|
-
def build_path(job,
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
89
|
+
def build_path(job, grouping_key)
|
90
|
+
path = format(PATH, ERB::Util::url_encode(job))
|
91
|
+
|
92
|
+
grouping_key.each do |label, value|
|
93
|
+
if value.include?('/')
|
94
|
+
encoded_value = Base64.urlsafe_encode64(value)
|
95
|
+
path += "/#{label}@base64/#{encoded_value}"
|
96
|
+
# While it's valid for the urlsafe_encode64 function to return an
|
97
|
+
# empty string when the input string is empty, it doesn't work for
|
98
|
+
# our specific use case as we're putting the result into a URL path
|
99
|
+
# segment. A double slash (`//`) can be normalised away by HTTP
|
100
|
+
# libraries, proxies, and web servers.
|
101
|
+
#
|
102
|
+
# For empty strings, we use a single padding character (`=`) as the
|
103
|
+
# value.
|
104
|
+
#
|
105
|
+
# See the pushgateway docs for more details:
|
106
|
+
#
|
107
|
+
# https://github.com/prometheus/pushgateway/blob/6393a901f56d4dda62cd0f6ab1f1f07c495b6354/README.md#url
|
108
|
+
elsif value.empty?
|
109
|
+
path += "/#{label}@base64/="
|
110
|
+
else
|
111
|
+
path += "/#{label}/#{ERB::Util::url_encode(value)}"
|
112
|
+
end
|
72
113
|
end
|
114
|
+
|
115
|
+
path
|
73
116
|
end
|
74
117
|
|
75
118
|
def request(req_class, registry = nil)
|
119
|
+
validate_no_label_clashes!(registry) if registry
|
120
|
+
|
76
121
|
req = req_class.new(@uri)
|
77
122
|
req.content_type = Formats::Text::CONTENT_TYPE
|
78
|
-
req.basic_auth(@
|
123
|
+
req.basic_auth(@user, @password) if @user
|
79
124
|
req.body = Formats::Text.marshal(registry) if registry
|
80
125
|
|
81
|
-
@http.request(req)
|
126
|
+
response = @http.request(req)
|
127
|
+
validate_response!(response)
|
128
|
+
|
129
|
+
response
|
82
130
|
end
|
83
131
|
|
84
132
|
def synchronize
|
85
133
|
@mutex.synchronize { yield }
|
86
134
|
end
|
135
|
+
|
136
|
+
def validate_no_basic_auth!(uri)
|
137
|
+
if uri.user || uri.password
|
138
|
+
raise ArgumentError, <<~EOF
|
139
|
+
Setting Basic Auth credentials in the gateway URL is not supported, please call the `basic_auth` method.
|
140
|
+
|
141
|
+
Received username `#{uri.user}` in gateway URL. Instead of passing
|
142
|
+
Basic Auth credentials like this:
|
143
|
+
|
144
|
+
```
|
145
|
+
push = Prometheus::Client::Push.new(job: "my-job", gateway: "http://user:password@localhost:9091")
|
146
|
+
```
|
147
|
+
|
148
|
+
please pass them like this:
|
149
|
+
|
150
|
+
```
|
151
|
+
push = Prometheus::Client::Push.new(job: "my-job", gateway: "http://localhost:9091")
|
152
|
+
push.basic_auth("user", "password")
|
153
|
+
```
|
154
|
+
|
155
|
+
While URLs do support passing Basic Auth credentials using the
|
156
|
+
`http://user:password@example.com/` syntax, the username and
|
157
|
+
password in that syntax have to follow the usual rules for URL
|
158
|
+
encoding of characters per RFC 3986
|
159
|
+
(https://datatracker.ietf.org/doc/html/rfc3986#section-2.1).
|
160
|
+
|
161
|
+
Rather than place the burden of correctly performing that encoding
|
162
|
+
on users of this gem, we decided to have a separate method for
|
163
|
+
supplying Basic Auth credentials, with no requirement to URL encode
|
164
|
+
the characters in them.
|
165
|
+
EOF
|
166
|
+
end
|
167
|
+
end
|
168
|
+
|
169
|
+
def validate_no_label_clashes!(registry)
|
170
|
+
# There's nothing to check if we don't have a grouping key
|
171
|
+
return if @grouping_key.empty?
|
172
|
+
|
173
|
+
# We could be doing a lot of comparisons, so let's do them against a
|
174
|
+
# set rather than an array
|
175
|
+
grouping_key_labels = @grouping_key.keys.to_set
|
176
|
+
|
177
|
+
registry.metrics.each do |metric|
|
178
|
+
metric.labels.each do |label|
|
179
|
+
if grouping_key_labels.include?(label)
|
180
|
+
raise LabelSetValidator::InvalidLabelSetError,
|
181
|
+
"label :#{label} from grouping key collides with label of the " \
|
182
|
+
"same name from metric :#{metric.name} and would overwrite it"
|
183
|
+
end
|
184
|
+
end
|
185
|
+
end
|
186
|
+
end
|
187
|
+
|
188
|
+
def validate_response!(response)
|
189
|
+
status = Integer(response.code)
|
190
|
+
if status >= 300
|
191
|
+
message = "status: #{response.code}, message: #{response.message}, body: #{response.body}"
|
192
|
+
if status <= 399
|
193
|
+
raise HttpRedirectError, message
|
194
|
+
elsif status <= 499
|
195
|
+
raise HttpClientError, message
|
196
|
+
else
|
197
|
+
raise HttpServerError, message
|
198
|
+
end
|
199
|
+
end
|
200
|
+
end
|
87
201
|
end
|
88
202
|
end
|
89
203
|
end
|
@@ -22,7 +22,7 @@ module Prometheus
|
|
22
22
|
name = metric.name
|
23
23
|
|
24
24
|
@mutex.synchronize do
|
25
|
-
if
|
25
|
+
if @metrics.key?(name.to_sym)
|
26
26
|
raise AlreadyRegisteredError, "#{name} has already been registered"
|
27
27
|
end
|
28
28
|
@metrics[name.to_sym] = metric
|
@@ -73,15 +73,15 @@ module Prometheus
|
|
73
73
|
end
|
74
74
|
|
75
75
|
def exist?(name)
|
76
|
-
@metrics.key?(name)
|
76
|
+
@mutex.synchronize { @metrics.key?(name) }
|
77
77
|
end
|
78
78
|
|
79
79
|
def get(name)
|
80
|
-
@metrics[name.to_sym]
|
80
|
+
@mutex.synchronize { @metrics[name.to_sym] }
|
81
81
|
end
|
82
82
|
|
83
83
|
def metrics
|
84
|
-
@metrics.values
|
84
|
+
@mutex.synchronize { @metrics.values }
|
85
85
|
end
|
86
86
|
end
|
87
87
|
end
|
@@ -11,7 +11,12 @@ module Prometheus
|
|
11
11
|
:summary
|
12
12
|
end
|
13
13
|
|
14
|
-
# Records a given value.
|
14
|
+
# Records a given value. The recorded value is usually positive
|
15
|
+
# or zero. A negative value is accepted but prevents current
|
16
|
+
# versions of Prometheus from properly detecting counter resets
|
17
|
+
# in the sum of observations. See
|
18
|
+
# https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
|
19
|
+
# for details.
|
15
20
|
def observe(value, labels: {})
|
16
21
|
base_label_set = label_set_for(labels)
|
17
22
|
|
@@ -36,15 +41,24 @@ module Prometheus
|
|
36
41
|
|
37
42
|
# Returns all label sets with their values expressed as hashes with their sum/count
|
38
43
|
def values
|
39
|
-
|
44
|
+
values = @store.all_values
|
40
45
|
|
41
|
-
|
46
|
+
values.each_with_object({}) do |(label_set, v), acc|
|
42
47
|
actual_label_set = label_set.reject{|l| l == :quantile }
|
43
48
|
acc[actual_label_set] ||= { "count" => 0.0, "sum" => 0.0 }
|
44
49
|
acc[actual_label_set][label_set[:quantile]] = v
|
45
50
|
end
|
46
51
|
end
|
47
52
|
|
53
|
+
def init_label_set(labels)
|
54
|
+
base_label_set = label_set_for(labels)
|
55
|
+
|
56
|
+
@store.synchronize do
|
57
|
+
@store.set(labels: base_label_set.merge(quantile: "count"), val: 0)
|
58
|
+
@store.set(labels: base_label_set.merge(quantile: "sum"), val: 0)
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
48
62
|
private
|
49
63
|
|
50
64
|
def reserved_labels
|