vinted-prometheus-client-mmap 1.2.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (52) hide show
  1. checksums.yaml +7 -0
  2. data/README.md +2 -0
  3. data/ext/fast_mmaped_file_rs/Cargo.toml +40 -0
  4. data/ext/fast_mmaped_file_rs/README.md +52 -0
  5. data/ext/fast_mmaped_file_rs/build.rs +7 -0
  6. data/ext/fast_mmaped_file_rs/extconf.rb +28 -0
  7. data/ext/fast_mmaped_file_rs/src/error.rs +174 -0
  8. data/ext/fast_mmaped_file_rs/src/exemplars.rs +25 -0
  9. data/ext/fast_mmaped_file_rs/src/file_entry.rs +1190 -0
  10. data/ext/fast_mmaped_file_rs/src/file_info.rs +240 -0
  11. data/ext/fast_mmaped_file_rs/src/lib.rs +87 -0
  12. data/ext/fast_mmaped_file_rs/src/macros.rs +14 -0
  13. data/ext/fast_mmaped_file_rs/src/map.rs +492 -0
  14. data/ext/fast_mmaped_file_rs/src/metrics.proto +153 -0
  15. data/ext/fast_mmaped_file_rs/src/mmap/inner.rs +704 -0
  16. data/ext/fast_mmaped_file_rs/src/mmap.rs +896 -0
  17. data/ext/fast_mmaped_file_rs/src/raw_entry.rs +473 -0
  18. data/ext/fast_mmaped_file_rs/src/testhelper.rs +222 -0
  19. data/ext/fast_mmaped_file_rs/src/util.rs +121 -0
  20. data/lib/.DS_Store +0 -0
  21. data/lib/prometheus/.DS_Store +0 -0
  22. data/lib/prometheus/client/configuration.rb +23 -0
  23. data/lib/prometheus/client/counter.rb +27 -0
  24. data/lib/prometheus/client/formats/protobuf.rb +92 -0
  25. data/lib/prometheus/client/formats/text.rb +85 -0
  26. data/lib/prometheus/client/gauge.rb +40 -0
  27. data/lib/prometheus/client/helper/entry_parser.rb +132 -0
  28. data/lib/prometheus/client/helper/file_locker.rb +50 -0
  29. data/lib/prometheus/client/helper/json_parser.rb +23 -0
  30. data/lib/prometheus/client/helper/metrics_processing.rb +45 -0
  31. data/lib/prometheus/client/helper/metrics_representation.rb +51 -0
  32. data/lib/prometheus/client/helper/mmaped_file.rb +64 -0
  33. data/lib/prometheus/client/helper/plain_file.rb +29 -0
  34. data/lib/prometheus/client/histogram.rb +80 -0
  35. data/lib/prometheus/client/label_set_validator.rb +85 -0
  36. data/lib/prometheus/client/metric.rb +80 -0
  37. data/lib/prometheus/client/mmaped_dict.rb +79 -0
  38. data/lib/prometheus/client/mmaped_value.rb +158 -0
  39. data/lib/prometheus/client/page_size.rb +17 -0
  40. data/lib/prometheus/client/push.rb +203 -0
  41. data/lib/prometheus/client/rack/collector.rb +88 -0
  42. data/lib/prometheus/client/rack/exporter.rb +102 -0
  43. data/lib/prometheus/client/registry.rb +65 -0
  44. data/lib/prometheus/client/simple_value.rb +31 -0
  45. data/lib/prometheus/client/summary.rb +69 -0
  46. data/lib/prometheus/client/support/puma.rb +44 -0
  47. data/lib/prometheus/client/support/unicorn.rb +35 -0
  48. data/lib/prometheus/client/uses_value_type.rb +20 -0
  49. data/lib/prometheus/client/version.rb +5 -0
  50. data/lib/prometheus/client.rb +58 -0
  51. data/lib/prometheus.rb +3 -0
  52. metadata +203 -0
@@ -0,0 +1,492 @@
1
+ use hashbrown::hash_map::RawEntryMut;
2
+ use hashbrown::HashMap;
3
+ use magnus::{exception::*, Error, RArray};
4
+ use std::hash::{BuildHasher, Hash, Hasher};
5
+ use std::mem::size_of;
6
+
7
+ use crate::error::MmapError;
8
+ use crate::file_entry::{BorrowedData, EntryData, EntryMetadata, FileEntry};
9
+ use crate::file_info::FileInfo;
10
+ use crate::raw_entry::RawEntry;
11
+ use crate::util::read_u32;
12
+ use crate::Result;
13
+ use crate::{err, HEADER_SIZE};
14
+
15
+ /// A HashMap of JSON strings and their associated metadata.
16
+ /// Used to print metrics in text format.
17
+ ///
18
+ /// The map key is the entry's JSON string and an optional pid string. The latter
19
+ /// allows us to have multiple entries on the map for multiple pids using the
20
+ /// same string.
21
+ #[derive(Default, Debug)]
22
+ pub struct EntryMap(HashMap<EntryData, EntryMetadata>);
23
+
24
+ impl EntryMap {
25
+ /// Construct a new EntryMap.
26
+ pub fn new() -> Self {
27
+ Self(HashMap::new())
28
+ }
29
+
30
+ /// Given a list of files, read each one into memory and parse the metrics it contains.
31
+ pub fn aggregate_files(&mut self, list_of_files: RArray) -> magnus::error::Result<()> {
32
+ // Pre-allocate the `HashMap` and validate we don't OOM. The C implementation
33
+ // ignores allocation failures here. We perform this check to avoid potential
34
+ // panics. We assume ~1,000 entries per file, so 72 KiB allocated per file.
35
+ self.0
36
+ .try_reserve(list_of_files.len() * 1024)
37
+ .map_err(|_| {
38
+ err!(
39
+ no_mem_error(),
40
+ "Couldn't allocate for {} memory",
41
+ size_of::<FileEntry>() * list_of_files.len() * 1024
42
+ )
43
+ })?;
44
+
45
+ // We expect file sizes between 4KiB and 4MiB. Pre-allocate 16KiB to reduce reallocations
46
+ // a bit.
47
+ let mut buf = Vec::new();
48
+ buf.try_reserve(16_384)
49
+ .map_err(|_| err!(no_mem_error(), "Couldn't allocate for {} memory", 16_384))?;
50
+
51
+ for item in list_of_files.each() {
52
+ let params = RArray::from_value(item?).expect("file list was not a Ruby Array");
53
+ if params.len() != 4 {
54
+ return Err(err!(
55
+ arg_error(),
56
+ "wrong number of arguments {} instead of 4",
57
+ params.len()
58
+ ));
59
+ }
60
+
61
+ let params = params.to_value_array::<4>()?;
62
+
63
+ let mut file_info = FileInfo::open_from_params(&params)?;
64
+ file_info.read_from_file(&mut buf)?;
65
+ self.process_buffer(file_info, &buf)?;
66
+ }
67
+ Ok(())
68
+ }
69
+
70
+ /// Consume the `EntryMap` and convert the key/value into`FileEntry`
71
+ /// objects, sorting them by their JSON strings.
72
+ pub fn into_sorted(self) -> Result<Vec<FileEntry>> {
73
+ let mut sorted = Vec::new();
74
+
75
+ // To match the behavior of the C version, pre-allocate the entries
76
+ // and check for allocation failure. Generally idiomatic Rust would
77
+ // `collect` the iterator into a new `Vec` in place, but this panics
78
+ // if it can't allocate and we want to continue execution in that
79
+ // scenario.
80
+ if sorted.try_reserve_exact(self.0.len()).is_err() {
81
+ return Err(MmapError::OutOfMemory(
82
+ self.0.len() * size_of::<FileEntry>(),
83
+ ));
84
+ }
85
+
86
+ sorted.extend(
87
+ self.0
88
+ .into_iter()
89
+ .map(|(data, meta)| FileEntry { data, meta }),
90
+ );
91
+
92
+ sorted.sort_unstable_by(|x, y| x.data.cmp(&y.data));
93
+
94
+ Ok(sorted)
95
+ }
96
+
97
+ /// Check if the `EntryMap` already contains the JSON string.
98
+ /// If yes, update the associated value, if not insert the
99
+ /// entry into the map.
100
+ pub fn merge_or_store(&mut self, data: BorrowedData, meta: EntryMetadata) -> Result<()> {
101
+ // Manually hash the `BorrowedData` and perform an equality check on the
102
+ // key. This allows us to perform the comparison without allocating a
103
+ // new `EntryData` that may not be needed.
104
+ let mut state = self.0.hasher().build_hasher();
105
+ data.hash(&mut state);
106
+ let hash = state.finish();
107
+
108
+ match self.0.raw_entry_mut().from_hash(hash, |k| k == &data) {
109
+ RawEntryMut::Vacant(entry) => {
110
+ // Allocate a new `EntryData` as the JSON/pid combination is
111
+ // not present in the map.
112
+ let owned = EntryData::try_from(data)?;
113
+ entry.insert(owned, meta);
114
+ }
115
+ RawEntryMut::Occupied(mut entry) => {
116
+ let existing = entry.get_mut();
117
+ existing.merge(&meta);
118
+ }
119
+ }
120
+
121
+ Ok(())
122
+ }
123
+
124
+ /// Parse metrics data from a `.db` file and store in the `EntryMap`.
125
+ fn process_buffer(&mut self, file_info: FileInfo, source: &[u8]) -> Result<()> {
126
+ if source.len() < HEADER_SIZE {
127
+ // Nothing to read, OK.
128
+ return Ok(());
129
+ }
130
+
131
+ // CAST: no-op on 32-bit, widening on 64-bit.
132
+ let used = read_u32(source, 0)? as usize;
133
+
134
+ if used > source.len() {
135
+ return Err(MmapError::PromParsing(format!(
136
+ "source file {} corrupted, used {used} > file size {}",
137
+ file_info.path.display(),
138
+ source.len()
139
+ )));
140
+ }
141
+
142
+ let mut pos = HEADER_SIZE;
143
+
144
+ while pos + size_of::<u32>() < used {
145
+ let raw_entry = RawEntry::from_slice(&source[pos..used])?;
146
+
147
+ if pos + raw_entry.total_len() > used {
148
+ return Err(MmapError::PromParsing(format!(
149
+ "source file {} corrupted, used {used} < stored data length {}",
150
+ file_info.path.display(),
151
+ pos + raw_entry.total_len()
152
+ )));
153
+ }
154
+
155
+ let meta = EntryMetadata::new(&raw_entry, &file_info)?;
156
+ let data = BorrowedData::new(&raw_entry, &file_info, meta.is_pid_significant())?;
157
+
158
+ self.merge_or_store(data, meta)?;
159
+
160
+ pos += raw_entry.total_len();
161
+ }
162
+
163
+ Ok(())
164
+ }
165
+ }
166
+
167
+ #[cfg(test)]
168
+ mod test {
169
+ use magnus::Symbol;
170
+ use std::mem;
171
+
172
+ use super::*;
173
+ use crate::file_entry::FileEntry;
174
+ use crate::testhelper::{self, TestFile};
175
+
176
+ impl EntryData {
177
+ /// A helper function for tests to convert owned data to references.
178
+ fn as_borrowed(&self) -> BorrowedData {
179
+ BorrowedData {
180
+ json: &self.json,
181
+ pid: self.pid.as_deref(),
182
+ }
183
+ }
184
+ }
185
+
186
+ #[test]
187
+ fn test_into_sorted() {
188
+ let _cleanup = unsafe { magnus::embed::init() };
189
+ let ruby = magnus::Ruby::get().unwrap();
190
+ crate::init(&ruby).unwrap();
191
+
192
+ let entries = vec![
193
+ FileEntry {
194
+ data: EntryData {
195
+ json: "zzzzzz".to_string(),
196
+ pid: Some("worker-0_0".to_string()),
197
+ },
198
+ meta: EntryMetadata {
199
+ multiprocess_mode: Symbol::new("max"),
200
+ type_: Symbol::new("gauge"),
201
+ value: 1.0,
202
+ },
203
+ },
204
+ FileEntry {
205
+ data: EntryData {
206
+ json: "zzz".to_string(),
207
+ pid: Some("worker-0_0".to_string()),
208
+ },
209
+ meta: EntryMetadata {
210
+ multiprocess_mode: Symbol::new("max"),
211
+ type_: Symbol::new("gauge"),
212
+ value: 1.0,
213
+ },
214
+ },
215
+ FileEntry {
216
+ data: EntryData {
217
+ json: "zzzaaa".to_string(),
218
+ pid: Some("worker-0_0".to_string()),
219
+ },
220
+ meta: EntryMetadata {
221
+ multiprocess_mode: Symbol::new("max"),
222
+ type_: Symbol::new("gauge"),
223
+ value: 1.0,
224
+ },
225
+ },
226
+ FileEntry {
227
+ data: EntryData {
228
+ json: "aaa".to_string(),
229
+ pid: Some("worker-0_0".to_string()),
230
+ },
231
+ meta: EntryMetadata {
232
+ multiprocess_mode: Symbol::new("max"),
233
+ type_: Symbol::new("gauge"),
234
+ value: 1.0,
235
+ },
236
+ },
237
+ FileEntry {
238
+ data: EntryData {
239
+ json: "ooo".to_string(),
240
+ pid: Some("worker-1_0".to_string()),
241
+ },
242
+ meta: EntryMetadata {
243
+ multiprocess_mode: Symbol::new("all"),
244
+ type_: Symbol::new("gauge"),
245
+ value: 1.0,
246
+ },
247
+ },
248
+ FileEntry {
249
+ data: EntryData {
250
+ json: "ooo".to_string(),
251
+ pid: Some("worker-0_0".to_string()),
252
+ },
253
+ meta: EntryMetadata {
254
+ multiprocess_mode: Symbol::new("all"),
255
+ type_: Symbol::new("gauge"),
256
+ value: 1.0,
257
+ },
258
+ },
259
+ ];
260
+
261
+ let mut map = EntryMap::new();
262
+
263
+ for entry in entries {
264
+ map.0.insert(entry.data, entry.meta);
265
+ }
266
+
267
+ let result = map.into_sorted();
268
+ assert!(result.is_ok());
269
+ let sorted = result.unwrap();
270
+ assert_eq!(sorted.len(), 6);
271
+ assert_eq!(sorted[0].data.json, "aaa");
272
+ assert_eq!(sorted[1].data.json, "ooo");
273
+ assert_eq!(sorted[1].data.pid.as_deref(), Some("worker-0_0"));
274
+ assert_eq!(sorted[2].data.json, "ooo");
275
+ assert_eq!(sorted[2].data.pid.as_deref(), Some("worker-1_0"));
276
+ assert_eq!(sorted[3].data.json, "zzz");
277
+ assert_eq!(sorted[4].data.json, "zzzaaa");
278
+ assert_eq!(sorted[5].data.json, "zzzzzz");
279
+ }
280
+
281
+ #[test]
282
+ fn test_merge_or_store() {
283
+ let _cleanup = unsafe { magnus::embed::init() };
284
+ let ruby = magnus::Ruby::get().unwrap();
285
+ crate::init(&ruby).unwrap();
286
+
287
+ let key = "foobar";
288
+
289
+ let starting_entry = FileEntry {
290
+ data: EntryData {
291
+ json: key.to_string(),
292
+ pid: Some("worker-0_0".to_string()),
293
+ },
294
+ meta: EntryMetadata {
295
+ multiprocess_mode: Symbol::new("all"),
296
+ type_: Symbol::new("gauge"),
297
+ value: 1.0,
298
+ },
299
+ };
300
+
301
+ let matching_entry = FileEntry {
302
+ data: EntryData {
303
+ json: key.to_string(),
304
+ pid: Some("worker-0_0".to_string()),
305
+ },
306
+ meta: EntryMetadata {
307
+ multiprocess_mode: Symbol::new("all"),
308
+ type_: Symbol::new("gauge"),
309
+ value: 5.0,
310
+ },
311
+ };
312
+
313
+ let same_key_different_worker = FileEntry {
314
+ data: EntryData {
315
+ json: key.to_string(),
316
+ pid: Some("worker-1_0".to_string()),
317
+ },
318
+ meta: EntryMetadata {
319
+ multiprocess_mode: Symbol::new("all"),
320
+ type_: Symbol::new("gauge"),
321
+ value: 100.0,
322
+ },
323
+ };
324
+
325
+ let unmatched_entry = FileEntry {
326
+ data: EntryData {
327
+ json: "another key".to_string(),
328
+ pid: Some("worker-0_0".to_string()),
329
+ },
330
+ meta: EntryMetadata {
331
+ multiprocess_mode: Symbol::new("all"),
332
+ type_: Symbol::new("gauge"),
333
+ value: 1.0,
334
+ },
335
+ };
336
+
337
+ let mut map = EntryMap::new();
338
+
339
+ map.0
340
+ .insert(starting_entry.data.clone(), starting_entry.meta.clone());
341
+
342
+ let matching_borrowed = matching_entry.data.as_borrowed();
343
+ map.merge_or_store(matching_borrowed, matching_entry.meta)
344
+ .unwrap();
345
+
346
+ assert_eq!(
347
+ 5.0,
348
+ map.0.get(&starting_entry.data).unwrap().value,
349
+ "value updated"
350
+ );
351
+ assert_eq!(1, map.0.len(), "no entry added");
352
+
353
+ let same_key_different_worker_borrowed = same_key_different_worker.data.as_borrowed();
354
+ map.merge_or_store(
355
+ same_key_different_worker_borrowed,
356
+ same_key_different_worker.meta,
357
+ )
358
+ .unwrap();
359
+
360
+ assert_eq!(
361
+ 5.0,
362
+ map.0.get(&starting_entry.data).unwrap().value,
363
+ "value unchanged"
364
+ );
365
+
366
+ assert_eq!(2, map.0.len(), "additional entry added");
367
+
368
+ let unmatched_entry_borrowed = unmatched_entry.data.as_borrowed();
369
+ map.merge_or_store(unmatched_entry_borrowed, unmatched_entry.meta)
370
+ .unwrap();
371
+
372
+ assert_eq!(
373
+ 5.0,
374
+ map.0.get(&starting_entry.data).unwrap().value,
375
+ "value unchanged"
376
+ );
377
+ assert_eq!(3, map.0.len(), "entry added");
378
+ }
379
+
380
+ #[test]
381
+ fn test_process_buffer() {
382
+ struct TestCase {
383
+ name: &'static str,
384
+ json: &'static [&'static str],
385
+ values: &'static [f64],
386
+ used: Option<u32>,
387
+ expected_ct: usize,
388
+ expected_err: Option<MmapError>,
389
+ }
390
+
391
+ let _cleanup = unsafe { magnus::embed::init() };
392
+ let ruby = magnus::Ruby::get().unwrap();
393
+ crate::init(&ruby).unwrap();
394
+
395
+ let tc = vec![
396
+ TestCase {
397
+ name: "single entry",
398
+ json: &[
399
+ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
400
+ ],
401
+ values: &[1.0],
402
+ used: None,
403
+ expected_ct: 1,
404
+ expected_err: None,
405
+ },
406
+ TestCase {
407
+ name: "multiple entries",
408
+ json: &[
409
+ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
410
+ r#"["second_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
411
+ ],
412
+ values: &[1.0, 2.0],
413
+ used: None,
414
+ expected_ct: 2,
415
+ expected_err: None,
416
+ },
417
+ TestCase {
418
+ name: "empty",
419
+ json: &[],
420
+ values: &[],
421
+ used: None,
422
+ expected_ct: 0,
423
+ expected_err: None,
424
+ },
425
+ TestCase {
426
+ name: "used too long",
427
+ json: &[
428
+ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
429
+ ],
430
+ values: &[1.0],
431
+ used: Some(9999),
432
+ expected_ct: 0,
433
+ expected_err: Some(MmapError::PromParsing(String::new())),
434
+ },
435
+ TestCase {
436
+ name: "used too short",
437
+ json: &[
438
+ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
439
+ ],
440
+ values: &[1.0],
441
+ used: Some(15),
442
+ expected_ct: 0,
443
+ expected_err: Some(MmapError::out_of_bounds(88, 7)),
444
+ },
445
+ ];
446
+
447
+ for case in tc {
448
+ let name = case.name;
449
+
450
+ let input_bytes = testhelper::entries_to_db(case.json, case.values, case.used);
451
+
452
+ let TestFile {
453
+ file,
454
+ path,
455
+ dir: _dir,
456
+ } = TestFile::new(&input_bytes);
457
+
458
+ let info = FileInfo {
459
+ file,
460
+ path,
461
+ len: case.json.len(),
462
+ multiprocess_mode: Symbol::new("max"),
463
+ type_: Symbol::new("gauge"),
464
+ pid: "worker-1".to_string(),
465
+ };
466
+
467
+ let mut map = EntryMap::new();
468
+ let result = map.process_buffer(info, &input_bytes);
469
+
470
+ assert_eq!(case.expected_ct, map.0.len(), "test case: {name} - count");
471
+
472
+ if let Some(expected_err) = case.expected_err {
473
+ // Validate we have the right enum type for the error. Error
474
+ // messages contain the temp dir path and can't be predicted
475
+ // exactly.
476
+ assert_eq!(
477
+ mem::discriminant(&expected_err),
478
+ mem::discriminant(&result.unwrap_err()),
479
+ "test case: {name} - failure"
480
+ );
481
+ } else {
482
+ assert_eq!(Ok(()), result, "test case: {name} - success");
483
+
484
+ assert_eq!(
485
+ case.json.len(),
486
+ map.0.len(),
487
+ "test case: {name} - all entries captured"
488
+ );
489
+ }
490
+ }
491
+ }
492
+ }
@@ -0,0 +1,153 @@
1
+ // Copyright 2013 Prometheus Team
2
+ // Licensed under the Apache License, Version 2.0 (the "License");
3
+ // you may not use this file except in compliance with the License.
4
+ // You may obtain a copy of the License at
5
+ //
6
+ // http://www.apache.org/licenses/LICENSE-2.0
7
+ //
8
+ // Unless required by applicable law or agreed to in writing, software
9
+ // distributed under the License is distributed on an "AS IS" BASIS,
10
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ // See the License for the specific language governing permissions and
12
+ // limitations under the License.
13
+
14
+ syntax = "proto2";
15
+
16
+ package io.prometheus.client;
17
+ option java_package = "io.prometheus.client";
18
+ option go_package = "github.com/prometheus/client_model/go;io_prometheus_client";
19
+
20
+ import "google/protobuf/timestamp.proto";
21
+
22
+ message LabelPair {
23
+ optional string name = 1;
24
+ optional string value = 2;
25
+ }
26
+
27
+ enum MetricType {
28
+ // COUNTER must use the Metric field "counter".
29
+ COUNTER = 0;
30
+ // GAUGE must use the Metric field "gauge".
31
+ GAUGE = 1;
32
+ // SUMMARY must use the Metric field "summary".
33
+ SUMMARY = 2;
34
+ // UNTYPED must use the Metric field "untyped".
35
+ UNTYPED = 3;
36
+ // HISTOGRAM must use the Metric field "histogram".
37
+ HISTOGRAM = 4;
38
+ // GAUGE_HISTOGRAM must use the Metric field "histogram".
39
+ GAUGE_HISTOGRAM = 5;
40
+ }
41
+
42
+ message Gauge {
43
+ optional double value = 1;
44
+ }
45
+
46
+ message Counter {
47
+ optional double value = 1;
48
+ optional Exemplar exemplar = 2;
49
+
50
+ optional google.protobuf.Timestamp created_timestamp = 3;
51
+ }
52
+
53
+ message Quantile {
54
+ optional double quantile = 1;
55
+ optional double value = 2;
56
+ }
57
+
58
+ message Summary {
59
+ optional uint64 sample_count = 1;
60
+ optional double sample_sum = 2;
61
+ repeated Quantile quantile = 3;
62
+
63
+ optional google.protobuf.Timestamp created_timestamp = 4;
64
+ }
65
+
66
+ message Untyped {
67
+ optional double value = 1;
68
+ }
69
+
70
+ message Histogram {
71
+ optional uint64 sample_count = 1;
72
+ optional double sample_count_float = 4; // Overrides sample_count if > 0.
73
+ optional double sample_sum = 2;
74
+ // Buckets for the conventional histogram.
75
+ repeated Bucket bucket = 3; // Ordered in increasing order of upper_bound, +Inf bucket is optional.
76
+
77
+ optional google.protobuf.Timestamp created_timestamp = 15;
78
+
79
+ // Everything below here is for native histograms (also known as sparse histograms).
80
+ // Native histograms are an experimental feature without stability guarantees.
81
+
82
+ // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
83
+ // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
84
+ // then each power of two is divided into 2^n logarithmic buckets.
85
+ // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
86
+ // In the future, more bucket schemas may be added using numbers < -4 or > 8.
87
+ optional sint32 schema = 5;
88
+ optional double zero_threshold = 6; // Breadth of the zero bucket.
89
+ optional uint64 zero_count = 7; // Count in zero bucket.
90
+ optional double zero_count_float = 8; // Overrides sb_zero_count if > 0.
91
+
92
+ // Negative buckets for the native histogram.
93
+ repeated BucketSpan negative_span = 9;
94
+ // Use either "negative_delta" or "negative_count", the former for
95
+ // regular histograms with integer counts, the latter for float
96
+ // histograms.
97
+ repeated sint64 negative_delta = 10; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
98
+ repeated double negative_count = 11; // Absolute count of each bucket.
99
+
100
+ // Positive buckets for the native histogram.
101
+ // Use a no-op span (offset 0, length 0) for a native histogram without any
102
+ // observations yet and with a zero_threshold of 0. Otherwise, it would be
103
+ // indistinguishable from a classic histogram.
104
+ repeated BucketSpan positive_span = 12;
105
+ // Use either "positive_delta" or "positive_count", the former for
106
+ // regular histograms with integer counts, the latter for float
107
+ // histograms.
108
+ repeated sint64 positive_delta = 13; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
109
+ repeated double positive_count = 14; // Absolute count of each bucket.
110
+ }
111
+
112
+ // A Bucket of a conventional histogram, each of which is treated as
113
+ // an individual counter-like time series by Prometheus.
114
+ message Bucket {
115
+ optional uint64 cumulative_count = 1; // Cumulative in increasing order.
116
+ optional double cumulative_count_float = 4; // Overrides cumulative_count if > 0.
117
+ optional double upper_bound = 2; // Inclusive.
118
+ optional Exemplar exemplar = 3;
119
+ }
120
+
121
+ // A BucketSpan defines a number of consecutive buckets in a native
122
+ // histogram with their offset. Logically, it would be more
123
+ // straightforward to include the bucket counts in the Span. However,
124
+ // the protobuf representation is more compact in the way the data is
125
+ // structured here (with all the buckets in a single array separate
126
+ // from the Spans).
127
+ message BucketSpan {
128
+ optional sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative).
129
+ optional uint32 length = 2; // Length of consecutive buckets.
130
+ }
131
+
132
+ message Exemplar {
133
+ repeated LabelPair label = 1;
134
+ optional double value = 2;
135
+ optional google.protobuf.Timestamp timestamp = 3; // OpenMetrics-style.
136
+ }
137
+
138
+ message Metric {
139
+ repeated LabelPair label = 1;
140
+ optional Gauge gauge = 2;
141
+ optional Counter counter = 3;
142
+ optional Summary summary = 4;
143
+ optional Untyped untyped = 5;
144
+ optional Histogram histogram = 7;
145
+ optional int64 timestamp_ms = 6;
146
+ }
147
+
148
+ message MetricFamily {
149
+ optional string name = 1;
150
+ optional string help = 2;
151
+ optional MetricType type = 3;
152
+ repeated Metric metric = 4;
153
+ }