prometheus-client 0.9.0 → 0.10.0.pre.alpha.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +230 -19
- data/lib/prometheus/client.rb +5 -0
- data/lib/prometheus/client/config.rb +15 -0
- data/lib/prometheus/client/counter.rb +2 -8
- data/lib/prometheus/client/data_stores/README.md +306 -0
- data/lib/prometheus/client/data_stores/direct_file_store.rb +313 -0
- data/lib/prometheus/client/data_stores/single_threaded.rb +58 -0
- data/lib/prometheus/client/data_stores/synchronized.rb +64 -0
- data/lib/prometheus/client/formats/text.rb +8 -14
- data/lib/prometheus/client/gauge.rb +6 -12
- data/lib/prometheus/client/histogram.rb +82 -34
- data/lib/prometheus/client/label_set_validator.rb +17 -13
- data/lib/prometheus/client/metric.rb +41 -22
- data/lib/prometheus/client/registry.rb +27 -9
- data/lib/prometheus/client/summary.rb +26 -35
- data/lib/prometheus/client/version.rb +1 -1
- data/lib/prometheus/middleware/collector.rb +32 -29
- metadata +36 -12
@@ -0,0 +1,313 @@
|
|
1
|
+
require 'concurrent'
|
2
|
+
require 'fileutils'
|
3
|
+
require "cgi"
|
4
|
+
|
5
|
+
module Prometheus
|
6
|
+
module Client
|
7
|
+
module DataStores
|
8
|
+
# Stores data in binary files, one file per process and per metric.
|
9
|
+
# This is generally the recommended store to use to deal with pre-fork servers and
|
10
|
+
# other "multi-process" scenarios.
|
11
|
+
#
|
12
|
+
# Each process will get a file for a metric, and it will manage its contents by
|
13
|
+
# storing keys next to binary-encoded Floats, and keeping track of the offsets of
|
14
|
+
# those Floats, to be able to update them directly as they increase.
|
15
|
+
#
|
16
|
+
# When exporting metrics, the process that gets scraped by Prometheus will find
|
17
|
+
# all the files that apply to a metric, read their contents, and aggregate them
|
18
|
+
# (generally that means SUMming the values for each labelset).
|
19
|
+
#
|
20
|
+
# In order to do this, each Metric needs an `:aggregation` setting, specifying how
|
21
|
+
# to aggregate the multiple possible values we can get for each labelset. By default,
|
22
|
+
# they are `SUM`med, which is what most use cases call for (counters and histograms,
|
23
|
+
# for example).
|
24
|
+
# However, for Gauges, it's possible to set `MAX` or `MIN` as aggregation, to get
|
25
|
+
# the highest value of all the processes / threads.
|
26
|
+
|
27
|
+
class DirectFileStore
|
28
|
+
class InvalidStoreSettingsError < StandardError; end
|
29
|
+
AGGREGATION_MODES = [MAX = :max, MIN = :min, SUM = :sum]
|
30
|
+
DEFAULT_METRIC_SETTINGS = { aggregation: SUM }
|
31
|
+
|
32
|
+
def initialize(dir:)
|
33
|
+
@store_settings = { dir: dir }
|
34
|
+
FileUtils.mkdir_p(dir)
|
35
|
+
end
|
36
|
+
|
37
|
+
def for_metric(metric_name, metric_type:, metric_settings: {})
|
38
|
+
settings = DEFAULT_METRIC_SETTINGS.merge(metric_settings)
|
39
|
+
validate_metric_settings(settings)
|
40
|
+
|
41
|
+
MetricStore.new(metric_name: metric_name,
|
42
|
+
store_settings: @store_settings,
|
43
|
+
metric_settings: settings)
|
44
|
+
end
|
45
|
+
|
46
|
+
private
|
47
|
+
|
48
|
+
def validate_metric_settings(metric_settings)
|
49
|
+
unless metric_settings.has_key?(:aggregation) &&
|
50
|
+
AGGREGATION_MODES.include?(metric_settings[:aggregation])
|
51
|
+
raise InvalidStoreSettingsError,
|
52
|
+
"Metrics need a valid :aggregation key"
|
53
|
+
end
|
54
|
+
|
55
|
+
unless (metric_settings.keys - [:aggregation]).empty?
|
56
|
+
raise InvalidStoreSettingsError,
|
57
|
+
"Only :aggregation setting can be specified"
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
class MetricStore
|
62
|
+
attr_reader :metric_name, :store_settings
|
63
|
+
|
64
|
+
def initialize(metric_name:, store_settings:, metric_settings:)
|
65
|
+
@metric_name = metric_name
|
66
|
+
@store_settings = store_settings
|
67
|
+
@values_aggregation_mode = metric_settings[:aggregation]
|
68
|
+
|
69
|
+
@rwlock = Concurrent::ReentrantReadWriteLock.new
|
70
|
+
end
|
71
|
+
|
72
|
+
# Synchronize is used to do a multi-process Mutex, when incrementing multiple
|
73
|
+
# values at once, so that the other process, reading the file for export, doesn't
|
74
|
+
# get incomplete increments.
|
75
|
+
#
|
76
|
+
# `in_process_sync`, instead, is just used so that two threads don't increment
|
77
|
+
# the same value and get a context switch between read and write leading to an
|
78
|
+
# inconsistency
|
79
|
+
def synchronize
|
80
|
+
in_process_sync do
|
81
|
+
internal_store.with_file_lock do
|
82
|
+
yield
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
def set(labels:, val:)
|
88
|
+
in_process_sync do
|
89
|
+
internal_store.write_value(store_key(labels), val.to_f)
|
90
|
+
end
|
91
|
+
end
|
92
|
+
|
93
|
+
def increment(labels:, by: 1)
|
94
|
+
key = store_key(labels)
|
95
|
+
in_process_sync do
|
96
|
+
value = internal_store.read_value(key)
|
97
|
+
internal_store.write_value(key, value + by.to_f)
|
98
|
+
end
|
99
|
+
end
|
100
|
+
|
101
|
+
def get(labels:)
|
102
|
+
in_process_sync do
|
103
|
+
internal_store.read_value(store_key(labels))
|
104
|
+
end
|
105
|
+
end
|
106
|
+
|
107
|
+
def all_values
|
108
|
+
stores_data = Hash.new{ |hash, key| hash[key] = [] }
|
109
|
+
|
110
|
+
# There's no need to call `synchronize` here. We're opening a second handle to
|
111
|
+
# the file, and `flock`ing it, which prevents inconsistent reads
|
112
|
+
stores_for_metric.each do |file_path|
|
113
|
+
begin
|
114
|
+
store = FileMappedDict.new(file_path, true)
|
115
|
+
store.all_values.each do |(labelset_qs, v)|
|
116
|
+
# Labels come as a query string, and CGI::parse returns arrays for each key
|
117
|
+
# "foo=bar&x=y" => { "foo" => ["bar"], "x" => ["y"] }
|
118
|
+
# Turn the keys back into symbols, and remove the arrays
|
119
|
+
label_set = CGI::parse(labelset_qs).map do |k, vs|
|
120
|
+
[k.to_sym, vs.first]
|
121
|
+
end.to_h
|
122
|
+
|
123
|
+
stores_data[label_set] << v
|
124
|
+
end
|
125
|
+
ensure
|
126
|
+
store.close if store
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
# Aggregate all the different values for each label_set
|
131
|
+
stores_data.each_with_object({}) do |(label_set, values), acc|
|
132
|
+
acc[label_set] = aggregate_values(values)
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
private
|
137
|
+
|
138
|
+
def in_process_sync
|
139
|
+
@rwlock.with_write_lock { yield }
|
140
|
+
end
|
141
|
+
|
142
|
+
def store_key(labels)
|
143
|
+
labels.map{|k,v| "#{CGI::escape(k.to_s)}=#{CGI::escape(v.to_s)}"}.join('&')
|
144
|
+
end
|
145
|
+
|
146
|
+
def internal_store
|
147
|
+
@internal_store ||= FileMappedDict.new(filemap_filename)
|
148
|
+
end
|
149
|
+
|
150
|
+
# Filename for this metric's PStore (one per process)
|
151
|
+
def filemap_filename
|
152
|
+
filename = "metric_#{ metric_name }___#{ process_id }.bin"
|
153
|
+
File.join(@store_settings[:dir], filename)
|
154
|
+
end
|
155
|
+
|
156
|
+
def stores_for_metric
|
157
|
+
Dir.glob(File.join(@store_settings[:dir], "metric_#{ metric_name }___*"))
|
158
|
+
end
|
159
|
+
|
160
|
+
def process_id
|
161
|
+
Process.pid
|
162
|
+
end
|
163
|
+
|
164
|
+
def aggregate_values(values)
|
165
|
+
if @values_aggregation_mode == SUM
|
166
|
+
values.inject { |sum, element| sum + element }
|
167
|
+
elsif @values_aggregation_mode == MAX
|
168
|
+
values.max
|
169
|
+
elsif @values_aggregation_mode == MIN
|
170
|
+
values.min
|
171
|
+
else
|
172
|
+
raise InvalidStoreSettingsError,
|
173
|
+
"Invalid Aggregation Mode: #{ @values_aggregation_mode }"
|
174
|
+
end
|
175
|
+
end
|
176
|
+
end
|
177
|
+
|
178
|
+
private_constant :MetricStore
|
179
|
+
|
180
|
+
# A dict of doubles, backed by an file we access directly a a byte array.
|
181
|
+
#
|
182
|
+
# The file starts with a 4 byte int, indicating how much of it is used.
|
183
|
+
# Then 4 bytes of padding.
|
184
|
+
# There's then a number of entries, consisting of a 4 byte int which is the
|
185
|
+
# size of the next field, a utf-8 encoded string key, padding to an 8 byte
|
186
|
+
# alignment, and then a 8 byte float which is the value.
|
187
|
+
class FileMappedDict
|
188
|
+
INITIAL_FILE_SIZE = 1024*1024
|
189
|
+
|
190
|
+
attr_reader :capacity, :used, :positions
|
191
|
+
|
192
|
+
def initialize(filename, readonly = false)
|
193
|
+
@positions = {}
|
194
|
+
@used = 0
|
195
|
+
|
196
|
+
open_file(filename, readonly)
|
197
|
+
@used = @f.read(4).unpack('l')[0] if @capacity > 0
|
198
|
+
|
199
|
+
if @used > 0
|
200
|
+
# File already has data. Read the existing values
|
201
|
+
with_file_lock do
|
202
|
+
read_all_values.each do |key, _, pos|
|
203
|
+
@positions[key] = pos
|
204
|
+
end
|
205
|
+
end
|
206
|
+
else
|
207
|
+
# File is empty. Init the `used` counter, if we're in write mode
|
208
|
+
if !readonly
|
209
|
+
@used = 8
|
210
|
+
@f.seek(0)
|
211
|
+
@f.write([@used].pack('l'))
|
212
|
+
end
|
213
|
+
end
|
214
|
+
end
|
215
|
+
|
216
|
+
# Yield (key, value, pos). No locking is performed.
|
217
|
+
def all_values
|
218
|
+
with_file_lock do
|
219
|
+
read_all_values.map { |k, v, p| [k, v] }
|
220
|
+
end
|
221
|
+
end
|
222
|
+
|
223
|
+
def read_value(key)
|
224
|
+
if !@positions.has_key?(key)
|
225
|
+
init_value(key)
|
226
|
+
end
|
227
|
+
|
228
|
+
pos = @positions[key]
|
229
|
+
@f.seek(pos)
|
230
|
+
@f.read(8).unpack('d')[0]
|
231
|
+
end
|
232
|
+
|
233
|
+
def write_value(key, value)
|
234
|
+
if !@positions.has_key?(key)
|
235
|
+
init_value(key)
|
236
|
+
end
|
237
|
+
|
238
|
+
pos = @positions[key]
|
239
|
+
@f.seek(pos)
|
240
|
+
@f.write([value].pack('d'))
|
241
|
+
@f.flush
|
242
|
+
end
|
243
|
+
|
244
|
+
def close
|
245
|
+
@f.close
|
246
|
+
end
|
247
|
+
|
248
|
+
def with_file_lock
|
249
|
+
@f.flock(File::LOCK_EX)
|
250
|
+
yield
|
251
|
+
ensure
|
252
|
+
@f.flock(File::LOCK_UN)
|
253
|
+
end
|
254
|
+
|
255
|
+
private
|
256
|
+
|
257
|
+
def open_file(filename, readonly)
|
258
|
+
mode = if readonly
|
259
|
+
"r"
|
260
|
+
elsif File.exist?(filename)
|
261
|
+
"r+b"
|
262
|
+
else
|
263
|
+
"w+b"
|
264
|
+
end
|
265
|
+
|
266
|
+
@f = File.open(filename, mode)
|
267
|
+
if @f.size == 0 && !readonly
|
268
|
+
resize_file(INITIAL_FILE_SIZE)
|
269
|
+
end
|
270
|
+
@capacity = @f.size
|
271
|
+
end
|
272
|
+
|
273
|
+
def resize_file(new_capacity)
|
274
|
+
@f.truncate(new_capacity)
|
275
|
+
end
|
276
|
+
|
277
|
+
# Initialize a value. Lock must be held by caller.
|
278
|
+
def init_value(key)
|
279
|
+
# Pad to be 8-byte aligned.
|
280
|
+
padded = key + (' ' * (8 - (key.length + 4) % 8))
|
281
|
+
value = [padded.length, padded, 0.0].pack("lA#{padded.length}d")
|
282
|
+
while @used + value.length > @capacity
|
283
|
+
@capacity *= 2
|
284
|
+
resize_file(@capacity)
|
285
|
+
end
|
286
|
+
@f.seek(@used)
|
287
|
+
@f.write(value)
|
288
|
+
@used += value.length
|
289
|
+
@f.seek(0)
|
290
|
+
@f.write([@used].pack('l'))
|
291
|
+
@f.flush
|
292
|
+
@positions[key] = @used - 8
|
293
|
+
end
|
294
|
+
|
295
|
+
# Yield (key, value, pos). No locking is performed.
|
296
|
+
def read_all_values
|
297
|
+
@f.seek(8)
|
298
|
+
values = []
|
299
|
+
while @f.pos < @used
|
300
|
+
padded_len = @f.read(4).unpack('l')[0]
|
301
|
+
encoded = @f.read(padded_len).unpack("A#{padded_len}")[0]
|
302
|
+
value = @f.read(8).unpack('d')[0]
|
303
|
+
values << [encoded.strip, value, @f.pos - 8]
|
304
|
+
end
|
305
|
+
values
|
306
|
+
end
|
307
|
+
end
|
308
|
+
end
|
309
|
+
end
|
310
|
+
end
|
311
|
+
end
|
312
|
+
|
313
|
+
|
@@ -0,0 +1,58 @@
|
|
1
|
+
require 'concurrent'
|
2
|
+
|
3
|
+
module Prometheus
|
4
|
+
module Client
|
5
|
+
module DataStores
|
6
|
+
# Stores all the data in a simple Hash for each Metric
|
7
|
+
#
|
8
|
+
# Has *no* synchronization primitives, making it the fastest store for single-threaded
|
9
|
+
# scenarios, but must absolutely not be used in multi-threaded scenarios.
|
10
|
+
class SingleThreaded
|
11
|
+
class InvalidStoreSettingsError < StandardError; end
|
12
|
+
|
13
|
+
def for_metric(metric_name, metric_type:, metric_settings: {})
|
14
|
+
# We don't need `metric_type` or `metric_settings` for this particular store
|
15
|
+
validate_metric_settings(metric_settings: metric_settings)
|
16
|
+
MetricStore.new
|
17
|
+
end
|
18
|
+
|
19
|
+
private
|
20
|
+
|
21
|
+
def validate_metric_settings(metric_settings:)
|
22
|
+
unless metric_settings.empty?
|
23
|
+
raise InvalidStoreSettingsError,
|
24
|
+
"SingleThreaded doesn't allow any metric_settings"
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
class MetricStore
|
29
|
+
def initialize
|
30
|
+
@internal_store = Hash.new { |hash, key| hash[key] = 0.0 }
|
31
|
+
end
|
32
|
+
|
33
|
+
def synchronize
|
34
|
+
yield
|
35
|
+
end
|
36
|
+
|
37
|
+
def set(labels:, val:)
|
38
|
+
@internal_store[labels] = val.to_f
|
39
|
+
end
|
40
|
+
|
41
|
+
def increment(labels:, by: 1)
|
42
|
+
@internal_store[labels] += by
|
43
|
+
end
|
44
|
+
|
45
|
+
def get(labels:)
|
46
|
+
@internal_store[labels]
|
47
|
+
end
|
48
|
+
|
49
|
+
def all_values
|
50
|
+
@internal_store.dup
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
private_constant :MetricStore
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
@@ -0,0 +1,64 @@
|
|
1
|
+
require 'concurrent'
|
2
|
+
|
3
|
+
module Prometheus
|
4
|
+
module Client
|
5
|
+
module DataStores
|
6
|
+
# Stores all the data in simple hashes, one per metric. Each of these metrics
|
7
|
+
# synchronizes access to their hash, but multiple metrics can run observations
|
8
|
+
# concurrently.
|
9
|
+
class Synchronized
|
10
|
+
class InvalidStoreSettingsError < StandardError; end
|
11
|
+
|
12
|
+
def for_metric(metric_name, metric_type:, metric_settings: {})
|
13
|
+
# We don't need `metric_type` or `metric_settings` for this particular store
|
14
|
+
validate_metric_settings(metric_settings: metric_settings)
|
15
|
+
MetricStore.new
|
16
|
+
end
|
17
|
+
|
18
|
+
private
|
19
|
+
|
20
|
+
def validate_metric_settings(metric_settings:)
|
21
|
+
unless metric_settings.empty?
|
22
|
+
raise InvalidStoreSettingsError,
|
23
|
+
"Synchronized doesn't allow any metric_settings"
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
class MetricStore
|
28
|
+
def initialize
|
29
|
+
@internal_store = Hash.new { |hash, key| hash[key] = 0.0 }
|
30
|
+
@rwlock = Concurrent::ReentrantReadWriteLock.new
|
31
|
+
end
|
32
|
+
|
33
|
+
def synchronize
|
34
|
+
@rwlock.with_write_lock { yield }
|
35
|
+
end
|
36
|
+
|
37
|
+
def set(labels:, val:)
|
38
|
+
synchronize do
|
39
|
+
@internal_store[labels] = val.to_f
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
def increment(labels:, by: 1)
|
44
|
+
synchronize do
|
45
|
+
@internal_store[labels] += by
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
def get(labels:)
|
50
|
+
synchronize do
|
51
|
+
@internal_store[labels]
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
def all_values
|
56
|
+
synchronize { @internal_store.dup }
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
private_constant :MetricStore
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
end
|
@@ -40,37 +40,31 @@ module Prometheus
|
|
40
40
|
private
|
41
41
|
|
42
42
|
def representation(metric, label_set, value, &block)
|
43
|
-
set = metric.base_labels.merge(label_set)
|
44
|
-
|
45
43
|
if metric.type == :summary
|
46
|
-
summary(metric.name,
|
44
|
+
summary(metric.name, label_set, value, &block)
|
47
45
|
elsif metric.type == :histogram
|
48
|
-
histogram(metric.name,
|
46
|
+
histogram(metric.name, label_set, value, &block)
|
49
47
|
else
|
50
|
-
yield metric(metric.name, labels(
|
48
|
+
yield metric(metric.name, labels(label_set), value)
|
51
49
|
end
|
52
50
|
end
|
53
51
|
|
54
52
|
def summary(name, set, value)
|
55
|
-
value.each do |q, v|
|
56
|
-
yield metric(name, labels(set.merge(quantile: q)), v)
|
57
|
-
end
|
58
|
-
|
59
53
|
l = labels(set)
|
60
|
-
yield metric("#{name}_sum", l, value
|
61
|
-
yield metric("#{name}_count", l, value
|
54
|
+
yield metric("#{name}_sum", l, value["sum"])
|
55
|
+
yield metric("#{name}_count", l, value["count"])
|
62
56
|
end
|
63
57
|
|
64
58
|
def histogram(name, set, value)
|
65
59
|
bucket = "#{name}_bucket"
|
66
60
|
value.each do |q, v|
|
61
|
+
next if q == "sum"
|
67
62
|
yield metric(bucket, labels(set.merge(le: q)), v)
|
68
63
|
end
|
69
|
-
yield metric(bucket, labels(set.merge(le: '+Inf')), value.total)
|
70
64
|
|
71
65
|
l = labels(set)
|
72
|
-
yield metric("#{name}_sum", l, value
|
73
|
-
yield metric("#{name}_count", l, value
|
66
|
+
yield metric("#{name}_sum", l, value["sum"])
|
67
|
+
yield metric("#{name}_count", l, value["+Inf"])
|
74
68
|
end
|
75
69
|
|
76
70
|
def metric(name, labels, value)
|