vinted-prometheus-client-mmap 1.5.0-x86_64-linux

Sign up to get free protection for your applications and to get access to all the features.
Files changed (57) hide show
  1. checksums.yaml +7 -0
  2. data/README.md +5 -0
  3. data/ext/fast_mmaped_file_rs/Cargo.toml +40 -0
  4. data/ext/fast_mmaped_file_rs/README.md +52 -0
  5. data/ext/fast_mmaped_file_rs/build.rs +7 -0
  6. data/ext/fast_mmaped_file_rs/extconf.rb +28 -0
  7. data/ext/fast_mmaped_file_rs/src/error.rs +174 -0
  8. data/ext/fast_mmaped_file_rs/src/exemplars.rs +25 -0
  9. data/ext/fast_mmaped_file_rs/src/file_entry.rs +1252 -0
  10. data/ext/fast_mmaped_file_rs/src/file_info.rs +240 -0
  11. data/ext/fast_mmaped_file_rs/src/lib.rs +89 -0
  12. data/ext/fast_mmaped_file_rs/src/macros.rs +14 -0
  13. data/ext/fast_mmaped_file_rs/src/map.rs +519 -0
  14. data/ext/fast_mmaped_file_rs/src/metrics.proto +153 -0
  15. data/ext/fast_mmaped_file_rs/src/mmap/inner.rs +775 -0
  16. data/ext/fast_mmaped_file_rs/src/mmap.rs +977 -0
  17. data/ext/fast_mmaped_file_rs/src/raw_entry.rs +547 -0
  18. data/ext/fast_mmaped_file_rs/src/testhelper.rs +222 -0
  19. data/ext/fast_mmaped_file_rs/src/util.rs +140 -0
  20. data/lib/.DS_Store +0 -0
  21. data/lib/2.7/fast_mmaped_file_rs.so +0 -0
  22. data/lib/3.0/fast_mmaped_file_rs.so +0 -0
  23. data/lib/3.1/fast_mmaped_file_rs.so +0 -0
  24. data/lib/3.2/fast_mmaped_file_rs.so +0 -0
  25. data/lib/3.3/fast_mmaped_file_rs.so +0 -0
  26. data/lib/prometheus/.DS_Store +0 -0
  27. data/lib/prometheus/client/configuration.rb +24 -0
  28. data/lib/prometheus/client/counter.rb +27 -0
  29. data/lib/prometheus/client/formats/protobuf.rb +93 -0
  30. data/lib/prometheus/client/formats/text.rb +85 -0
  31. data/lib/prometheus/client/gauge.rb +40 -0
  32. data/lib/prometheus/client/helper/entry_parser.rb +132 -0
  33. data/lib/prometheus/client/helper/file_locker.rb +50 -0
  34. data/lib/prometheus/client/helper/json_parser.rb +23 -0
  35. data/lib/prometheus/client/helper/metrics_processing.rb +45 -0
  36. data/lib/prometheus/client/helper/metrics_representation.rb +51 -0
  37. data/lib/prometheus/client/helper/mmaped_file.rb +64 -0
  38. data/lib/prometheus/client/helper/plain_file.rb +29 -0
  39. data/lib/prometheus/client/histogram.rb +80 -0
  40. data/lib/prometheus/client/label_set_validator.rb +85 -0
  41. data/lib/prometheus/client/metric.rb +80 -0
  42. data/lib/prometheus/client/mmaped_dict.rb +83 -0
  43. data/lib/prometheus/client/mmaped_value.rb +164 -0
  44. data/lib/prometheus/client/page_size.rb +17 -0
  45. data/lib/prometheus/client/push.rb +203 -0
  46. data/lib/prometheus/client/rack/collector.rb +88 -0
  47. data/lib/prometheus/client/rack/exporter.rb +102 -0
  48. data/lib/prometheus/client/registry.rb +65 -0
  49. data/lib/prometheus/client/simple_value.rb +31 -0
  50. data/lib/prometheus/client/summary.rb +69 -0
  51. data/lib/prometheus/client/support/puma.rb +44 -0
  52. data/lib/prometheus/client/support/unicorn.rb +35 -0
  53. data/lib/prometheus/client/uses_value_type.rb +20 -0
  54. data/lib/prometheus/client/version.rb +5 -0
  55. data/lib/prometheus/client.rb +58 -0
  56. data/lib/prometheus.rb +3 -0
  57. metadata +210 -0
@@ -0,0 +1,519 @@
1
+ use hashbrown::hash_map::RawEntryMut;
2
+ use hashbrown::HashMap;
3
+ use magnus::{exception::*, Error, RArray};
4
+ use std::hash::{BuildHasher, Hash, Hasher};
5
+ use std::mem::size_of;
6
+
7
+ use crate::error::MmapError;
8
+ use crate::file_entry::{BorrowedData, EntryData, EntryMetadata, FileEntry};
9
+ use crate::file_info::FileInfo;
10
+ use crate::raw_entry::RawEntry;
11
+ use crate::util::read_u32;
12
+ use crate::Result;
13
+ use crate::{err, HEADER_SIZE};
14
+
15
+ /// A HashMap of JSON strings and their associated metadata.
16
+ /// Used to print metrics in text format.
17
+ ///
18
+ /// The map key is the entry's JSON string and an optional pid string. The latter
19
+ /// allows us to have multiple entries on the map for multiple pids using the
20
+ /// same string.
21
+ #[derive(Default, Debug)]
22
+ pub struct EntryMap(HashMap<EntryData, EntryMetadata>);
23
+
24
+ impl EntryMap {
25
+ /// Construct a new EntryMap.
26
+ pub fn new() -> Self {
27
+ Self(HashMap::new())
28
+ }
29
+
30
+ /// Given a list of files, read each one into memory and parse the metrics it contains.
31
+ pub fn aggregate_files(&mut self, list_of_files: RArray) -> magnus::error::Result<()> {
32
+ // Pre-allocate the `HashMap` and validate we don't OOM. The C implementation
33
+ // ignores allocation failures here. We perform this check to avoid potential
34
+ // panics. We assume ~1,000 entries per file, so 72 KiB allocated per file.
35
+ self.0
36
+ .try_reserve(list_of_files.len() * 1024)
37
+ .map_err(|_| {
38
+ err!(
39
+ no_mem_error(),
40
+ "Couldn't allocate for {} memory",
41
+ size_of::<FileEntry>() * list_of_files.len() * 1024
42
+ )
43
+ })?;
44
+
45
+ // We expect file sizes between 4KiB and 4MiB. Pre-allocate 16KiB to reduce reallocations
46
+ // a bit.
47
+ let mut buf = Vec::new();
48
+ buf.try_reserve(16_384)
49
+ .map_err(|_| err!(no_mem_error(), "Couldn't allocate for {} memory", 16_384))?;
50
+
51
+ for item in list_of_files.each() {
52
+ let params = RArray::from_value(item?).expect("file list was not a Ruby Array");
53
+ if params.len() != 4 {
54
+ return Err(err!(
55
+ arg_error(),
56
+ "wrong number of arguments {} instead of 4",
57
+ params.len()
58
+ ));
59
+ }
60
+
61
+ let params = params.to_value_array::<4>()?;
62
+
63
+ let mut file_info = FileInfo::open_from_params(&params)?;
64
+ file_info.read_from_file(&mut buf)?;
65
+ self.process_buffer(file_info, &buf)?;
66
+ }
67
+ Ok(())
68
+ }
69
+
70
+ /// Consume the `EntryMap` and convert the key/value into`FileEntry`
71
+ /// objects, sorting them by their JSON strings.
72
+ pub fn into_sorted(self) -> Result<Vec<FileEntry>> {
73
+ let mut sorted = Vec::new();
74
+
75
+ // To match the behavior of the C version, pre-allocate the entries
76
+ // and check for allocation failure. Generally idiomatic Rust would
77
+ // `collect` the iterator into a new `Vec` in place, but this panics
78
+ // if it can't allocate and we want to continue execution in that
79
+ // scenario.
80
+ if sorted.try_reserve_exact(self.0.len()).is_err() {
81
+ return Err(MmapError::OutOfMemory(
82
+ self.0.len() * size_of::<FileEntry>(),
83
+ ));
84
+ }
85
+
86
+ sorted.extend(
87
+ self.0
88
+ .into_iter()
89
+ .map(|(data, meta)| FileEntry { data, meta }),
90
+ );
91
+
92
+ sorted.sort_unstable_by(|x, y| x.data.cmp(&y.data));
93
+
94
+ Ok(sorted)
95
+ }
96
+
97
+ /// Check if the `EntryMap` already contains the JSON string.
98
+ /// If yes, update the associated value, if not insert the
99
+ /// entry into the map.
100
+ pub fn merge_or_store(&mut self, data: BorrowedData, meta: EntryMetadata) -> Result<()> {
101
+ // Manually hash the `BorrowedData` and perform an equality check on the
102
+ // key. This allows us to perform the comparison without allocating a
103
+ // new `EntryData` that may not be needed.
104
+ let mut state = self.0.hasher().build_hasher();
105
+ data.hash(&mut state);
106
+ let hash = state.finish();
107
+
108
+ match self.0.raw_entry_mut().from_hash(hash, |k| k == &data) {
109
+ RawEntryMut::Vacant(entry) => {
110
+ // Allocate a new `EntryData` as the JSON/pid combination is
111
+ // not present in the map.
112
+ let owned = EntryData::try_from(data)?;
113
+ entry.insert(owned, meta);
114
+ }
115
+ RawEntryMut::Occupied(mut entry) => {
116
+ let existing = entry.get_mut();
117
+ existing.merge(&meta);
118
+ }
119
+ }
120
+
121
+ Ok(())
122
+ }
123
+
124
+ /// Parse metrics data from a `.db` file and store in the `EntryMap`.
125
+ fn process_buffer(&mut self, file_info: FileInfo, source: &[u8]) -> Result<()> {
126
+ if source.len() < HEADER_SIZE {
127
+ // Nothing to read, OK.
128
+ return Ok(());
129
+ }
130
+
131
+ // CAST: no-op on 32-bit, widening on 64-bit.
132
+ let used = read_u32(source, 0)? as usize;
133
+
134
+ if used > source.len() {
135
+ return Err(MmapError::PromParsing(format!(
136
+ "source file {} corrupted, used {used} > file size {}",
137
+ file_info.path.display(),
138
+ source.len()
139
+ )));
140
+ }
141
+
142
+ let mut pos = HEADER_SIZE;
143
+
144
+ while pos + size_of::<u32>() < used {
145
+ let raw_entry: RawEntry;
146
+
147
+ if file_info.type_.to_string() == "exemplar" {
148
+ raw_entry = RawEntry::from_slice_exemplar(&source[pos..used])?;
149
+
150
+ if pos + raw_entry.total_len_exemplar() > used {
151
+ return Err(MmapError::PromParsing(format!(
152
+ "source file {} corrupted, used {used} < stored data length {}",
153
+ file_info.path.display(),
154
+ pos + raw_entry.total_len()
155
+ )));
156
+ }
157
+
158
+ pos += raw_entry.total_len_exemplar();
159
+
160
+ } else {
161
+ raw_entry = RawEntry::from_slice(&source[pos..used])?;
162
+
163
+ if pos + raw_entry.total_len() > used {
164
+ return Err(MmapError::PromParsing(format!(
165
+ "source file {} corrupted, used {used} < stored data length {}",
166
+ file_info.path.display(),
167
+ pos + raw_entry.total_len()
168
+ )));
169
+ }
170
+
171
+ pos += raw_entry.total_len();
172
+ }
173
+
174
+ let meta = EntryMetadata::new(&raw_entry, &file_info)?;
175
+ let data = BorrowedData::new(&raw_entry, &file_info, meta.is_pid_significant())?;
176
+
177
+ self.merge_or_store(data, meta)?;
178
+ }
179
+
180
+ Ok(())
181
+ }
182
+ }
183
+
184
+ #[cfg(test)]
185
+ mod test {
186
+ use magnus::Symbol;
187
+ use std::mem;
188
+
189
+ use super::*;
190
+ use crate::file_entry::FileEntry;
191
+ use crate::testhelper::{self, TestFile};
192
+
193
+ impl EntryData {
194
+ /// A helper function for tests to convert owned data to references.
195
+ fn as_borrowed(&self) -> BorrowedData {
196
+ BorrowedData {
197
+ json: &self.json,
198
+ pid: self.pid.as_deref(),
199
+ }
200
+ }
201
+ }
202
+
203
+ #[test]
204
+ fn test_into_sorted() {
205
+ let _cleanup = unsafe { magnus::embed::init() };
206
+ let ruby = magnus::Ruby::get().unwrap();
207
+ crate::init(&ruby).unwrap();
208
+
209
+ let entries = vec![
210
+ FileEntry {
211
+ data: EntryData {
212
+ json: "zzzzzz".to_string(),
213
+ pid: Some("worker-0_0".to_string()),
214
+ },
215
+ meta: EntryMetadata {
216
+ multiprocess_mode: Symbol::new("max"),
217
+ type_: Symbol::new("gauge"),
218
+ value: Some(1.0),
219
+ ex: None,
220
+ },
221
+ },
222
+ FileEntry {
223
+ data: EntryData {
224
+ json: "zzz".to_string(),
225
+ pid: Some("worker-0_0".to_string()),
226
+ },
227
+ meta: EntryMetadata {
228
+ multiprocess_mode: Symbol::new("max"),
229
+ type_: Symbol::new("gauge"),
230
+ value: Some(1.0),
231
+ ex: None,
232
+ },
233
+ },
234
+ FileEntry {
235
+ data: EntryData {
236
+ json: "zzzaaa".to_string(),
237
+ pid: Some("worker-0_0".to_string()),
238
+ },
239
+ meta: EntryMetadata {
240
+ multiprocess_mode: Symbol::new("max"),
241
+ type_: Symbol::new("gauge"),
242
+ value: Some(1.0),
243
+ ex: None,
244
+ },
245
+ },
246
+ FileEntry {
247
+ data: EntryData {
248
+ json: "aaa".to_string(),
249
+ pid: Some("worker-0_0".to_string()),
250
+ },
251
+ meta: EntryMetadata {
252
+ multiprocess_mode: Symbol::new("max"),
253
+ type_: Symbol::new("gauge"),
254
+ value: Some(1.0),
255
+ ex: None,
256
+ },
257
+ },
258
+ FileEntry {
259
+ data: EntryData {
260
+ json: "ooo".to_string(),
261
+ pid: Some("worker-1_0".to_string()),
262
+ },
263
+ meta: EntryMetadata {
264
+ multiprocess_mode: Symbol::new("all"),
265
+ type_: Symbol::new("gauge"),
266
+ value: Some(1.0),
267
+ ex: None,
268
+ },
269
+ },
270
+ FileEntry {
271
+ data: EntryData {
272
+ json: "ooo".to_string(),
273
+ pid: Some("worker-0_0".to_string()),
274
+ },
275
+ meta: EntryMetadata {
276
+ multiprocess_mode: Symbol::new("all"),
277
+ type_: Symbol::new("gauge"),
278
+ value: Some(1.0),
279
+ ex: None,
280
+ },
281
+ },
282
+ ];
283
+
284
+ let mut map = EntryMap::new();
285
+
286
+ for entry in entries {
287
+ map.0.insert(entry.data, entry.meta);
288
+ }
289
+
290
+ let result = map.into_sorted();
291
+ assert!(result.is_ok());
292
+ let sorted = result.unwrap();
293
+ assert_eq!(sorted.len(), 6);
294
+ assert_eq!(sorted[0].data.json, "aaa");
295
+ assert_eq!(sorted[1].data.json, "ooo");
296
+ assert_eq!(sorted[1].data.pid.as_deref(), Some("worker-0_0"));
297
+ assert_eq!(sorted[2].data.json, "ooo");
298
+ assert_eq!(sorted[2].data.pid.as_deref(), Some("worker-1_0"));
299
+ assert_eq!(sorted[3].data.json, "zzz");
300
+ assert_eq!(sorted[4].data.json, "zzzaaa");
301
+ assert_eq!(sorted[5].data.json, "zzzzzz");
302
+ }
303
+
304
+ #[test]
305
+ fn test_merge_or_store() {
306
+ let _cleanup = unsafe { magnus::embed::init() };
307
+ let ruby = magnus::Ruby::get().unwrap();
308
+ crate::init(&ruby).unwrap();
309
+
310
+ let key = "foobar";
311
+
312
+ let starting_entry = FileEntry {
313
+ data: EntryData {
314
+ json: key.to_string(),
315
+ pid: Some("worker-0_0".to_string()),
316
+ },
317
+ meta: EntryMetadata {
318
+ multiprocess_mode: Symbol::new("all"),
319
+ type_: Symbol::new("gauge"),
320
+ value: Some(1.0),
321
+ ex: None,
322
+ },
323
+ };
324
+
325
+ let matching_entry = FileEntry {
326
+ data: EntryData {
327
+ json: key.to_string(),
328
+ pid: Some("worker-0_0".to_string()),
329
+ },
330
+ meta: EntryMetadata {
331
+ multiprocess_mode: Symbol::new("all"),
332
+ type_: Symbol::new("gauge"),
333
+ value: Some(5.0),
334
+ ex: None,
335
+ },
336
+ };
337
+
338
+ let same_key_different_worker = FileEntry {
339
+ data: EntryData {
340
+ json: key.to_string(),
341
+ pid: Some("worker-1_0".to_string()),
342
+ },
343
+ meta: EntryMetadata {
344
+ multiprocess_mode: Symbol::new("all"),
345
+ type_: Symbol::new("gauge"),
346
+ value: Some(100.0),
347
+ ex: None,
348
+ },
349
+ };
350
+
351
+ let unmatched_entry = FileEntry {
352
+ data: EntryData {
353
+ json: "another key".to_string(),
354
+ pid: Some("worker-0_0".to_string()),
355
+ },
356
+ meta: EntryMetadata {
357
+ multiprocess_mode: Symbol::new("all"),
358
+ type_: Symbol::new("gauge"),
359
+ value: Some(100.0),
360
+ ex: None,
361
+ },
362
+ };
363
+
364
+ let mut map = EntryMap::new();
365
+
366
+ map.0
367
+ .insert(starting_entry.data.clone(), starting_entry.meta.clone());
368
+
369
+ let matching_borrowed = matching_entry.data.as_borrowed();
370
+ map.merge_or_store(matching_borrowed, matching_entry.meta)
371
+ .unwrap();
372
+
373
+ assert_eq!(
374
+ 5.0,
375
+ map.0.get(&starting_entry.data).unwrap().value.unwrap(),
376
+ "value updated"
377
+ );
378
+ assert_eq!(1, map.0.len(), "no entry added");
379
+
380
+ let same_key_different_worker_borrowed = same_key_different_worker.data.as_borrowed();
381
+ map.merge_or_store(
382
+ same_key_different_worker_borrowed,
383
+ same_key_different_worker.meta,
384
+ )
385
+ .unwrap();
386
+
387
+ assert_eq!(
388
+ 5.0,
389
+ map.0.get(&starting_entry.data).unwrap().value.unwrap(),
390
+ "value unchanged"
391
+ );
392
+
393
+ assert_eq!(2, map.0.len(), "additional entry added");
394
+
395
+ let unmatched_entry_borrowed = unmatched_entry.data.as_borrowed();
396
+ map.merge_or_store(unmatched_entry_borrowed, unmatched_entry.meta)
397
+ .unwrap();
398
+
399
+ assert_eq!(
400
+ 5.0,
401
+ map.0.get(&starting_entry.data).unwrap().value.unwrap(),
402
+ "value unchanged"
403
+ );
404
+ assert_eq!(3, map.0.len(), "entry added");
405
+ }
406
+
407
+ #[test]
408
+ fn test_process_buffer() {
409
+ struct TestCase {
410
+ name: &'static str,
411
+ json: &'static [&'static str],
412
+ values: &'static [f64],
413
+ used: Option<u32>,
414
+ expected_ct: usize,
415
+ expected_err: Option<MmapError>,
416
+ }
417
+
418
+ let _cleanup = unsafe { magnus::embed::init() };
419
+ let ruby = magnus::Ruby::get().unwrap();
420
+ crate::init(&ruby).unwrap();
421
+
422
+ let tc = vec![
423
+ TestCase {
424
+ name: "single entry",
425
+ json: &[
426
+ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
427
+ ],
428
+ values: &[1.0],
429
+ used: None,
430
+ expected_ct: 1,
431
+ expected_err: None,
432
+ },
433
+ TestCase {
434
+ name: "multiple entries",
435
+ json: &[
436
+ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
437
+ r#"["second_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
438
+ ],
439
+ values: &[1.0, 2.0],
440
+ used: None,
441
+ expected_ct: 2,
442
+ expected_err: None,
443
+ },
444
+ TestCase {
445
+ name: "empty",
446
+ json: &[],
447
+ values: &[],
448
+ used: None,
449
+ expected_ct: 0,
450
+ expected_err: None,
451
+ },
452
+ TestCase {
453
+ name: "used too long",
454
+ json: &[
455
+ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
456
+ ],
457
+ values: &[1.0],
458
+ used: Some(9999),
459
+ expected_ct: 0,
460
+ expected_err: Some(MmapError::PromParsing(String::new())),
461
+ },
462
+ TestCase {
463
+ name: "used too short",
464
+ json: &[
465
+ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
466
+ ],
467
+ values: &[1.0],
468
+ used: Some(15),
469
+ expected_ct: 0,
470
+ expected_err: Some(MmapError::out_of_bounds(88, 7)),
471
+ },
472
+ ];
473
+
474
+ for case in tc {
475
+ let name = case.name;
476
+
477
+ let input_bytes = testhelper::entries_to_db(case.json, case.values, case.used);
478
+
479
+ let TestFile {
480
+ file,
481
+ path,
482
+ dir: _dir,
483
+ } = TestFile::new(&input_bytes);
484
+
485
+ let info = FileInfo {
486
+ file,
487
+ path,
488
+ len: case.json.len(),
489
+ multiprocess_mode: Symbol::new("max"),
490
+ type_: Symbol::new("gauge"),
491
+ pid: "worker-1".to_string(),
492
+ };
493
+
494
+ let mut map = EntryMap::new();
495
+ let result = map.process_buffer(info, &input_bytes);
496
+
497
+ assert_eq!(case.expected_ct, map.0.len(), "test case: {name} - count");
498
+
499
+ if let Some(expected_err) = case.expected_err {
500
+ // Validate we have the right enum type for the error. Error
501
+ // messages contain the temp dir path and can't be predicted
502
+ // exactly.
503
+ assert_eq!(
504
+ mem::discriminant(&expected_err),
505
+ mem::discriminant(&result.unwrap_err()),
506
+ "test case: {name} - failure"
507
+ );
508
+ } else {
509
+ assert_eq!(Ok(()), result, "test case: {name} - success");
510
+
511
+ assert_eq!(
512
+ case.json.len(),
513
+ map.0.len(),
514
+ "test case: {name} - all entries captured"
515
+ );
516
+ }
517
+ }
518
+ }
519
+ }
@@ -0,0 +1,153 @@
1
+ // Copyright 2013 Prometheus Team
2
+ // Licensed under the Apache License, Version 2.0 (the "License");
3
+ // you may not use this file except in compliance with the License.
4
+ // You may obtain a copy of the License at
5
+ //
6
+ // http://www.apache.org/licenses/LICENSE-2.0
7
+ //
8
+ // Unless required by applicable law or agreed to in writing, software
9
+ // distributed under the License is distributed on an "AS IS" BASIS,
10
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ // See the License for the specific language governing permissions and
12
+ // limitations under the License.
13
+
14
+ syntax = "proto2";
15
+
16
+ package io.prometheus.client;
17
+ option java_package = "io.prometheus.client";
18
+ option go_package = "github.com/prometheus/client_model/go;io_prometheus_client";
19
+
20
+ import "google/protobuf/timestamp.proto";
21
+
22
+ message LabelPair {
23
+ optional string name = 1;
24
+ optional string value = 2;
25
+ }
26
+
27
+ enum MetricType {
28
+ // COUNTER must use the Metric field "counter".
29
+ COUNTER = 0;
30
+ // GAUGE must use the Metric field "gauge".
31
+ GAUGE = 1;
32
+ // SUMMARY must use the Metric field "summary".
33
+ SUMMARY = 2;
34
+ // UNTYPED must use the Metric field "untyped".
35
+ UNTYPED = 3;
36
+ // HISTOGRAM must use the Metric field "histogram".
37
+ HISTOGRAM = 4;
38
+ // GAUGE_HISTOGRAM must use the Metric field "histogram".
39
+ GAUGE_HISTOGRAM = 5;
40
+ }
41
+
42
+ message Gauge {
43
+ optional double value = 1;
44
+ }
45
+
46
+ message Counter {
47
+ optional double value = 1;
48
+ optional Exemplar exemplar = 2;
49
+
50
+ optional google.protobuf.Timestamp created_timestamp = 3;
51
+ }
52
+
53
+ message Quantile {
54
+ optional double quantile = 1;
55
+ optional double value = 2;
56
+ }
57
+
58
+ message Summary {
59
+ optional uint64 sample_count = 1;
60
+ optional double sample_sum = 2;
61
+ repeated Quantile quantile = 3;
62
+
63
+ optional google.protobuf.Timestamp created_timestamp = 4;
64
+ }
65
+
66
+ message Untyped {
67
+ optional double value = 1;
68
+ }
69
+
70
+ message Histogram {
71
+ optional uint64 sample_count = 1;
72
+ optional double sample_count_float = 4; // Overrides sample_count if > 0.
73
+ optional double sample_sum = 2;
74
+ // Buckets for the conventional histogram.
75
+ repeated Bucket bucket = 3; // Ordered in increasing order of upper_bound, +Inf bucket is optional.
76
+
77
+ optional google.protobuf.Timestamp created_timestamp = 15;
78
+
79
+ // Everything below here is for native histograms (also known as sparse histograms).
80
+ // Native histograms are an experimental feature without stability guarantees.
81
+
82
+ // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
83
+ // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
84
+ // then each power of two is divided into 2^n logarithmic buckets.
85
+ // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
86
+ // In the future, more bucket schemas may be added using numbers < -4 or > 8.
87
+ optional sint32 schema = 5;
88
+ optional double zero_threshold = 6; // Breadth of the zero bucket.
89
+ optional uint64 zero_count = 7; // Count in zero bucket.
90
+ optional double zero_count_float = 8; // Overrides sb_zero_count if > 0.
91
+
92
+ // Negative buckets for the native histogram.
93
+ repeated BucketSpan negative_span = 9;
94
+ // Use either "negative_delta" or "negative_count", the former for
95
+ // regular histograms with integer counts, the latter for float
96
+ // histograms.
97
+ repeated sint64 negative_delta = 10; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
98
+ repeated double negative_count = 11; // Absolute count of each bucket.
99
+
100
+ // Positive buckets for the native histogram.
101
+ // Use a no-op span (offset 0, length 0) for a native histogram without any
102
+ // observations yet and with a zero_threshold of 0. Otherwise, it would be
103
+ // indistinguishable from a classic histogram.
104
+ repeated BucketSpan positive_span = 12;
105
+ // Use either "positive_delta" or "positive_count", the former for
106
+ // regular histograms with integer counts, the latter for float
107
+ // histograms.
108
+ repeated sint64 positive_delta = 13; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
109
+ repeated double positive_count = 14; // Absolute count of each bucket.
110
+ }
111
+
112
+ // A Bucket of a conventional histogram, each of which is treated as
113
+ // an individual counter-like time series by Prometheus.
114
+ message Bucket {
115
+ optional uint64 cumulative_count = 1; // Cumulative in increasing order.
116
+ optional double cumulative_count_float = 4; // Overrides cumulative_count if > 0.
117
+ optional double upper_bound = 2; // Inclusive.
118
+ optional Exemplar exemplar = 3;
119
+ }
120
+
121
+ // A BucketSpan defines a number of consecutive buckets in a native
122
+ // histogram with their offset. Logically, it would be more
123
+ // straightforward to include the bucket counts in the Span. However,
124
+ // the protobuf representation is more compact in the way the data is
125
+ // structured here (with all the buckets in a single array separate
126
+ // from the Spans).
127
+ message BucketSpan {
128
+ optional sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative).
129
+ optional uint32 length = 2; // Length of consecutive buckets.
130
+ }
131
+
132
+ message Exemplar {
133
+ repeated LabelPair label = 1;
134
+ optional double value = 2;
135
+ optional google.protobuf.Timestamp timestamp = 3; // OpenMetrics-style.
136
+ }
137
+
138
+ message Metric {
139
+ repeated LabelPair label = 1;
140
+ optional Gauge gauge = 2;
141
+ optional Counter counter = 3;
142
+ optional Summary summary = 4;
143
+ optional Untyped untyped = 5;
144
+ optional Histogram histogram = 7;
145
+ optional int64 timestamp_ms = 6;
146
+ }
147
+
148
+ message MetricFamily {
149
+ optional string name = 1;
150
+ optional string help = 2;
151
+ optional MetricType type = 3;
152
+ repeated Metric metric = 4;
153
+ }