vinted-prometheus-client-mmap 1.5.0-x86_64-linux
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/README.md +5 -0
- data/ext/fast_mmaped_file_rs/Cargo.toml +40 -0
- data/ext/fast_mmaped_file_rs/README.md +52 -0
- data/ext/fast_mmaped_file_rs/build.rs +7 -0
- data/ext/fast_mmaped_file_rs/extconf.rb +28 -0
- data/ext/fast_mmaped_file_rs/src/error.rs +174 -0
- data/ext/fast_mmaped_file_rs/src/exemplars.rs +25 -0
- data/ext/fast_mmaped_file_rs/src/file_entry.rs +1252 -0
- data/ext/fast_mmaped_file_rs/src/file_info.rs +240 -0
- data/ext/fast_mmaped_file_rs/src/lib.rs +89 -0
- data/ext/fast_mmaped_file_rs/src/macros.rs +14 -0
- data/ext/fast_mmaped_file_rs/src/map.rs +519 -0
- data/ext/fast_mmaped_file_rs/src/metrics.proto +153 -0
- data/ext/fast_mmaped_file_rs/src/mmap/inner.rs +775 -0
- data/ext/fast_mmaped_file_rs/src/mmap.rs +977 -0
- data/ext/fast_mmaped_file_rs/src/raw_entry.rs +547 -0
- data/ext/fast_mmaped_file_rs/src/testhelper.rs +222 -0
- data/ext/fast_mmaped_file_rs/src/util.rs +140 -0
- data/lib/.DS_Store +0 -0
- data/lib/2.7/fast_mmaped_file_rs.so +0 -0
- data/lib/3.0/fast_mmaped_file_rs.so +0 -0
- data/lib/3.1/fast_mmaped_file_rs.so +0 -0
- data/lib/3.2/fast_mmaped_file_rs.so +0 -0
- data/lib/3.3/fast_mmaped_file_rs.so +0 -0
- data/lib/prometheus/.DS_Store +0 -0
- data/lib/prometheus/client/configuration.rb +24 -0
- data/lib/prometheus/client/counter.rb +27 -0
- data/lib/prometheus/client/formats/protobuf.rb +93 -0
- data/lib/prometheus/client/formats/text.rb +85 -0
- data/lib/prometheus/client/gauge.rb +40 -0
- data/lib/prometheus/client/helper/entry_parser.rb +132 -0
- data/lib/prometheus/client/helper/file_locker.rb +50 -0
- data/lib/prometheus/client/helper/json_parser.rb +23 -0
- data/lib/prometheus/client/helper/metrics_processing.rb +45 -0
- data/lib/prometheus/client/helper/metrics_representation.rb +51 -0
- data/lib/prometheus/client/helper/mmaped_file.rb +64 -0
- data/lib/prometheus/client/helper/plain_file.rb +29 -0
- data/lib/prometheus/client/histogram.rb +80 -0
- data/lib/prometheus/client/label_set_validator.rb +85 -0
- data/lib/prometheus/client/metric.rb +80 -0
- data/lib/prometheus/client/mmaped_dict.rb +83 -0
- data/lib/prometheus/client/mmaped_value.rb +164 -0
- data/lib/prometheus/client/page_size.rb +17 -0
- data/lib/prometheus/client/push.rb +203 -0
- data/lib/prometheus/client/rack/collector.rb +88 -0
- data/lib/prometheus/client/rack/exporter.rb +102 -0
- data/lib/prometheus/client/registry.rb +65 -0
- data/lib/prometheus/client/simple_value.rb +31 -0
- data/lib/prometheus/client/summary.rb +69 -0
- data/lib/prometheus/client/support/puma.rb +44 -0
- data/lib/prometheus/client/support/unicorn.rb +35 -0
- data/lib/prometheus/client/uses_value_type.rb +20 -0
- data/lib/prometheus/client/version.rb +5 -0
- data/lib/prometheus/client.rb +58 -0
- data/lib/prometheus.rb +3 -0
- metadata +210 -0
@@ -0,0 +1,222 @@
|
|
1
|
+
use bstr::{BString, B};
|
2
|
+
use std::fs::File;
|
3
|
+
use std::io::{Read, Seek, Write};
|
4
|
+
use std::path::PathBuf;
|
5
|
+
use tempfile::{tempdir, TempDir};
|
6
|
+
|
7
|
+
use crate::raw_entry::RawEntry;
|
8
|
+
use crate::HEADER_SIZE;
|
9
|
+
|
10
|
+
#[derive(PartialEq, Default, Debug)]
|
11
|
+
pub struct TestEntry {
|
12
|
+
pub header: u32,
|
13
|
+
pub json: &'static str,
|
14
|
+
pub padding_len: usize,
|
15
|
+
pub value: f64,
|
16
|
+
}
|
17
|
+
|
18
|
+
impl TestEntry {
|
19
|
+
pub fn new(json: &'static str, value: f64) -> Self {
|
20
|
+
TestEntry {
|
21
|
+
header: json.len() as u32,
|
22
|
+
json,
|
23
|
+
padding_len: RawEntry::padding_len(json.len()),
|
24
|
+
value,
|
25
|
+
}
|
26
|
+
}
|
27
|
+
|
28
|
+
pub fn as_bytes(&self) -> Vec<u8> {
|
29
|
+
[
|
30
|
+
B(&self.header.to_ne_bytes()),
|
31
|
+
self.json.as_bytes(),
|
32
|
+
&vec![b' '; self.padding_len],
|
33
|
+
B(&self.value.to_ne_bytes()),
|
34
|
+
]
|
35
|
+
.concat()
|
36
|
+
}
|
37
|
+
pub fn as_bstring(&self) -> BString {
|
38
|
+
[
|
39
|
+
B(&self.header.to_ne_bytes()),
|
40
|
+
self.json.as_bytes(),
|
41
|
+
&vec![b' '; self.padding_len],
|
42
|
+
B(&self.value.to_ne_bytes()),
|
43
|
+
]
|
44
|
+
.concat()
|
45
|
+
.into()
|
46
|
+
}
|
47
|
+
|
48
|
+
pub fn as_bytes_no_header(&self) -> BString {
|
49
|
+
[
|
50
|
+
self.json.as_bytes(),
|
51
|
+
&vec![b' '; self.padding_len],
|
52
|
+
B(&self.value.to_ne_bytes()),
|
53
|
+
]
|
54
|
+
.concat()
|
55
|
+
.into()
|
56
|
+
}
|
57
|
+
}
|
58
|
+
|
59
|
+
/// Format the data for a `.db` file.
|
60
|
+
/// Optional header value can be used to set an invalid `used` size.
|
61
|
+
pub fn entries_to_db(entries: &[&'static str], values: &[f64], header: Option<u32>) -> Vec<u8> {
|
62
|
+
let mut out = Vec::new();
|
63
|
+
|
64
|
+
let entry_bytes: Vec<_> = entries
|
65
|
+
.iter()
|
66
|
+
.zip(values)
|
67
|
+
.flat_map(|(e, val)| TestEntry::new(e, *val).as_bytes())
|
68
|
+
.collect();
|
69
|
+
|
70
|
+
let used = match header {
|
71
|
+
Some(u) => u,
|
72
|
+
None => (entry_bytes.len() + HEADER_SIZE) as u32,
|
73
|
+
};
|
74
|
+
|
75
|
+
out.extend(used.to_ne_bytes());
|
76
|
+
out.extend([0x0u8; 4]); // Padding.
|
77
|
+
out.extend(entry_bytes);
|
78
|
+
|
79
|
+
out
|
80
|
+
}
|
81
|
+
|
82
|
+
/// A temporary file, path, and dir for use with testing.
|
83
|
+
#[derive(Debug)]
|
84
|
+
pub struct TestFile {
|
85
|
+
pub file: File,
|
86
|
+
pub path: PathBuf,
|
87
|
+
pub dir: TempDir,
|
88
|
+
}
|
89
|
+
|
90
|
+
impl TestFile {
|
91
|
+
pub fn new(file_data: &[u8]) -> TestFile {
|
92
|
+
let dir = tempdir().unwrap();
|
93
|
+
let path = dir.path().join("test.db");
|
94
|
+
let mut file = File::options()
|
95
|
+
.create(true)
|
96
|
+
.read(true)
|
97
|
+
.write(true)
|
98
|
+
.open(&path)
|
99
|
+
.unwrap();
|
100
|
+
|
101
|
+
file.write_all(file_data).unwrap();
|
102
|
+
file.sync_all().unwrap();
|
103
|
+
file.rewind().unwrap();
|
104
|
+
|
105
|
+
// We need to keep `dir` in scope so it doesn't drop before the files it
|
106
|
+
// contains, which may prevent cleanup.
|
107
|
+
TestFile { file, path, dir }
|
108
|
+
}
|
109
|
+
}
|
110
|
+
|
111
|
+
mod test {
|
112
|
+
use super::*;
|
113
|
+
|
114
|
+
#[test]
|
115
|
+
fn test_entry_new() {
|
116
|
+
let json = "foobar";
|
117
|
+
let value = 1.0f64;
|
118
|
+
let expected = TestEntry {
|
119
|
+
header: 6,
|
120
|
+
json,
|
121
|
+
padding_len: 6,
|
122
|
+
value,
|
123
|
+
};
|
124
|
+
|
125
|
+
let actual = TestEntry::new(json, value);
|
126
|
+
assert_eq!(expected, actual);
|
127
|
+
}
|
128
|
+
|
129
|
+
#[test]
|
130
|
+
fn test_entry_bytes() {
|
131
|
+
let json = "foobar";
|
132
|
+
let value = 1.0f64;
|
133
|
+
let expected = [
|
134
|
+
&6u32.to_ne_bytes(),
|
135
|
+
B(json),
|
136
|
+
&[b' '; 6],
|
137
|
+
&value.to_ne_bytes(),
|
138
|
+
]
|
139
|
+
.concat();
|
140
|
+
|
141
|
+
let actual = TestEntry::new(json, value).as_bstring();
|
142
|
+
assert_eq!(expected, actual);
|
143
|
+
}
|
144
|
+
|
145
|
+
#[test]
|
146
|
+
fn test_entry_bytes_no_header() {
|
147
|
+
let json = "foobar";
|
148
|
+
let value = 1.0f64;
|
149
|
+
let expected = [B(json), &[b' '; 6], &value.to_ne_bytes()].concat();
|
150
|
+
|
151
|
+
let actual = TestEntry::new(json, value).as_bytes_no_header();
|
152
|
+
assert_eq!(expected, actual);
|
153
|
+
}
|
154
|
+
|
155
|
+
#[test]
|
156
|
+
fn test_entries_to_db_header_correct() {
|
157
|
+
let json = &["foobar", "qux"];
|
158
|
+
let values = &[1.0, 2.0];
|
159
|
+
|
160
|
+
let out = entries_to_db(json, values, None);
|
161
|
+
|
162
|
+
assert_eq!(48u32.to_ne_bytes(), out[0..4], "used set correctly");
|
163
|
+
assert_eq!([0u8; 4], out[4..8], "padding set");
|
164
|
+
assert_eq!(
|
165
|
+
TestEntry::new(json[0], values[0]).as_bytes(),
|
166
|
+
out[8..32],
|
167
|
+
"first entry matches"
|
168
|
+
);
|
169
|
+
assert_eq!(
|
170
|
+
TestEntry::new(json[1], values[1]).as_bytes(),
|
171
|
+
out[32..48],
|
172
|
+
"second entry matches"
|
173
|
+
);
|
174
|
+
}
|
175
|
+
|
176
|
+
#[test]
|
177
|
+
fn test_entries_to_db_header_wrong() {
|
178
|
+
let json = &["foobar", "qux"];
|
179
|
+
let values = &[1.0, 2.0];
|
180
|
+
|
181
|
+
const WRONG_USED: u32 = 1000;
|
182
|
+
let out = entries_to_db(json, values, Some(WRONG_USED));
|
183
|
+
|
184
|
+
assert_eq!(
|
185
|
+
WRONG_USED.to_ne_bytes(),
|
186
|
+
out[0..4],
|
187
|
+
"used set to value requested"
|
188
|
+
);
|
189
|
+
assert_eq!([0u8; 4], out[4..8], "padding set");
|
190
|
+
assert_eq!(
|
191
|
+
TestEntry::new(json[0], values[0]).as_bytes(),
|
192
|
+
out[8..32],
|
193
|
+
"first entry matches"
|
194
|
+
);
|
195
|
+
assert_eq!(
|
196
|
+
TestEntry::new(json[1], values[1]).as_bytes(),
|
197
|
+
out[32..48],
|
198
|
+
"second entry matches"
|
199
|
+
);
|
200
|
+
}
|
201
|
+
|
202
|
+
#[test]
|
203
|
+
fn test_file() {
|
204
|
+
let mut test_file = TestFile::new(b"foobar");
|
205
|
+
let stat = test_file.file.metadata().unwrap();
|
206
|
+
|
207
|
+
assert_eq!(6, stat.len(), "file length");
|
208
|
+
assert_eq!(
|
209
|
+
0,
|
210
|
+
test_file.file.stream_position().unwrap(),
|
211
|
+
"at start of file"
|
212
|
+
);
|
213
|
+
let mut out_buf = vec![0u8; 256];
|
214
|
+
let read_result = test_file.file.read(&mut out_buf);
|
215
|
+
assert!(read_result.is_ok());
|
216
|
+
assert_eq!(6, read_result.unwrap(), "file is readable");
|
217
|
+
|
218
|
+
let write_result = test_file.file.write(b"qux");
|
219
|
+
assert!(write_result.is_ok());
|
220
|
+
assert_eq!(3, write_result.unwrap(), "file is writable");
|
221
|
+
}
|
222
|
+
}
|
@@ -0,0 +1,140 @@
|
|
1
|
+
use nix::errno::Errno;
|
2
|
+
use nix::libc::c_long;
|
3
|
+
use std::fmt::Display;
|
4
|
+
use std::io;
|
5
|
+
use std::mem::size_of;
|
6
|
+
|
7
|
+
use crate::error::MmapError;
|
8
|
+
use crate::exemplars::{Exemplar, EXEMPLAR_ENTRY_MAX_SIZE_BYTES};
|
9
|
+
use crate::Result;
|
10
|
+
|
11
|
+
/// Wrapper around `checked_add()` that converts failures
|
12
|
+
/// to `MmapError::Overflow`.
|
13
|
+
pub trait CheckedOps: Sized {
|
14
|
+
fn add_chk(self, rhs: Self) -> Result<Self>;
|
15
|
+
fn mul_chk(self, rhs: Self) -> Result<Self>;
|
16
|
+
}
|
17
|
+
|
18
|
+
impl CheckedOps for usize {
|
19
|
+
fn add_chk(self, rhs: Self) -> Result<Self> {
|
20
|
+
self.checked_add(rhs)
|
21
|
+
.ok_or_else(|| MmapError::overflowed(self, rhs, "adding"))
|
22
|
+
}
|
23
|
+
|
24
|
+
fn mul_chk(self, rhs: Self) -> Result<Self> {
|
25
|
+
self.checked_mul(rhs)
|
26
|
+
.ok_or_else(|| MmapError::overflowed(self, rhs, "multiplying"))
|
27
|
+
}
|
28
|
+
}
|
29
|
+
|
30
|
+
impl CheckedOps for c_long {
|
31
|
+
fn add_chk(self, rhs: Self) -> Result<Self> {
|
32
|
+
self.checked_add(rhs)
|
33
|
+
.ok_or_else(|| MmapError::overflowed(self, rhs, "adding"))
|
34
|
+
}
|
35
|
+
|
36
|
+
fn mul_chk(self, rhs: Self) -> Result<Self> {
|
37
|
+
self.checked_mul(rhs)
|
38
|
+
.ok_or_else(|| MmapError::overflowed(self, rhs, "multiplying"))
|
39
|
+
}
|
40
|
+
}
|
41
|
+
|
42
|
+
/// A wrapper around `TryFrom`, returning `MmapError::FailedCast` on error.
|
43
|
+
pub fn cast_chk<T, U>(val: T, name: &str) -> Result<U>
|
44
|
+
where
|
45
|
+
T: Copy + Display,
|
46
|
+
U: std::convert::TryFrom<T>,
|
47
|
+
{
|
48
|
+
U::try_from(val).map_err(|_| MmapError::failed_cast::<T, U>(val, name))
|
49
|
+
}
|
50
|
+
|
51
|
+
/// Retrieve errno(3).
|
52
|
+
pub fn errno() -> i32 {
|
53
|
+
// UNWRAP: This will always return `Some` when called from `last_os_error()`.
|
54
|
+
io::Error::last_os_error().raw_os_error().unwrap()
|
55
|
+
}
|
56
|
+
|
57
|
+
/// Get the error string associated with errno(3).
|
58
|
+
/// Equivalent to strerror(3).
|
59
|
+
pub fn strerror(errno: i32) -> &'static str {
|
60
|
+
Errno::from_i32(errno).desc()
|
61
|
+
}
|
62
|
+
|
63
|
+
/// Read a `u32` value from a byte slice starting from `offset`.
|
64
|
+
#[inline]
|
65
|
+
pub fn read_u32(buf: &[u8], offset: usize) -> Result<u32> {
|
66
|
+
if let Some(slice) = buf.get(offset..offset + size_of::<u32>()) {
|
67
|
+
// UNWRAP: We can safely unwrap the conversion from slice to array as we
|
68
|
+
// the source and targets are constructed here with the same length.
|
69
|
+
let out: &[u8; size_of::<u32>()] = slice.try_into().unwrap();
|
70
|
+
|
71
|
+
return Ok(u32::from_ne_bytes(*out));
|
72
|
+
}
|
73
|
+
Err(MmapError::out_of_bounds(offset, buf.len()))
|
74
|
+
}
|
75
|
+
|
76
|
+
/// Read an `f64` value from a byte slice starting from `offset`.
|
77
|
+
#[inline]
|
78
|
+
pub fn read_f64(buf: &[u8], offset: usize) -> Result<f64> {
|
79
|
+
if let Some(slice) = buf.get(offset..offset + size_of::<f64>()) {
|
80
|
+
// UNWRAP: We can safely unwrap the conversion from slice to array as we
|
81
|
+
// can be sure the target array has same length as the source slice.
|
82
|
+
let out: &[u8; size_of::<f64>()] = slice.try_into().unwrap();
|
83
|
+
|
84
|
+
return Ok(f64::from_ne_bytes(*out));
|
85
|
+
}
|
86
|
+
Err(MmapError::out_of_bounds(
|
87
|
+
offset + size_of::<f64>(),
|
88
|
+
buf.len(),
|
89
|
+
))
|
90
|
+
}
|
91
|
+
|
92
|
+
pub fn read_exemplar(buf: &[u8], offset: usize) -> Result<Exemplar> {
|
93
|
+
if let Some(slice) = buf.get(offset..offset + EXEMPLAR_ENTRY_MAX_SIZE_BYTES) {
|
94
|
+
// UNWRAP: We can safely unwrap the conversion from slice to array as we
|
95
|
+
// can be sure the target array has same length as the source slice.
|
96
|
+
let out: &[u8; EXEMPLAR_ENTRY_MAX_SIZE_BYTES] = slice.try_into().expect("failed to convert slice to array");
|
97
|
+
|
98
|
+
let res: Vec<u8> = out.iter().cloned().filter(|&x| x != 0).collect();
|
99
|
+
|
100
|
+
let v: Exemplar = serde_json::from_slice(&res).expect("failed to convert string to Exemplar");
|
101
|
+
|
102
|
+
return Ok(v)
|
103
|
+
}
|
104
|
+
Err(MmapError::out_of_bounds(
|
105
|
+
offset + EXEMPLAR_ENTRY_MAX_SIZE_BYTES,
|
106
|
+
buf.len(),
|
107
|
+
))
|
108
|
+
}
|
109
|
+
|
110
|
+
#[cfg(test)]
|
111
|
+
mod test {
|
112
|
+
use super::*;
|
113
|
+
|
114
|
+
#[test]
|
115
|
+
fn test_read_u32() {
|
116
|
+
let buf = 1u32.to_ne_bytes();
|
117
|
+
|
118
|
+
assert!(matches!(read_u32(&buf, 0), Ok(1)), "index ok");
|
119
|
+
assert!(read_u32(&buf, 10).is_err(), "index out of range");
|
120
|
+
assert!(
|
121
|
+
read_u32(&buf, 1).is_err(),
|
122
|
+
"index in range but end out of range"
|
123
|
+
);
|
124
|
+
}
|
125
|
+
|
126
|
+
#[test]
|
127
|
+
fn test_read_f64() {
|
128
|
+
let buf = 1.00f64.to_ne_bytes();
|
129
|
+
|
130
|
+
let ok = read_f64(&buf, 0);
|
131
|
+
assert!(ok.is_ok());
|
132
|
+
assert_eq!(ok.unwrap(), 1.00);
|
133
|
+
|
134
|
+
assert!(read_f64(&buf, 10).is_err(), "index out of range");
|
135
|
+
assert!(
|
136
|
+
read_f64(&buf, 1).is_err(),
|
137
|
+
"index in range but end out of range"
|
138
|
+
);
|
139
|
+
}
|
140
|
+
}
|
data/lib/.DS_Store
ADDED
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
@@ -0,0 +1,24 @@
|
|
1
|
+
require 'prometheus/client/registry'
|
2
|
+
require 'prometheus/client/mmaped_value'
|
3
|
+
require 'prometheus/client/page_size'
|
4
|
+
require 'logger'
|
5
|
+
require 'tmpdir'
|
6
|
+
|
7
|
+
module Prometheus
|
8
|
+
module Client
|
9
|
+
class Configuration
|
10
|
+
attr_accessor :value_class, :multiprocess_files_dir, :initial_mmap_file_size, :logger, :pid_provider, :enable_protobuf
|
11
|
+
|
12
|
+
def initialize
|
13
|
+
@value_class = ::Prometheus::Client::MmapedValue
|
14
|
+
@initial_mmap_file_size = ::Prometheus::Client::PageSize.page_size(fallback_page_size: 4096)
|
15
|
+
@logger = Logger.new($stdout)
|
16
|
+
@pid_provider = Process.method(:pid)
|
17
|
+
@multiprocess_files_dir = ENV.fetch('prometheus_multiproc_dir') do
|
18
|
+
Dir.mktmpdir("prometheus-mmap")
|
19
|
+
end
|
20
|
+
@enable_protobuf = false
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
@@ -0,0 +1,27 @@
|
|
1
|
+
# encoding: UTF-8
|
2
|
+
|
3
|
+
require 'prometheus/client/metric'
|
4
|
+
|
5
|
+
module Prometheus
|
6
|
+
module Client
|
7
|
+
# Counter is a metric that exposes merely a sum or tally of things.
|
8
|
+
class Counter < Metric
|
9
|
+
def type
|
10
|
+
:counter
|
11
|
+
end
|
12
|
+
|
13
|
+
def increment(labels = {}, by = 1, exemplar_name = '', exemplar_value = '')
|
14
|
+
raise ArgumentError, 'increment must be a non-negative number' if by < 0
|
15
|
+
|
16
|
+
label_set = label_set_for(labels)
|
17
|
+
synchronize { @values[label_set].increment(by, exemplar_name, exemplar_value) }
|
18
|
+
end
|
19
|
+
|
20
|
+
private
|
21
|
+
|
22
|
+
def default(labels)
|
23
|
+
value_object(type, @name, @name, labels)
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
@@ -0,0 +1,93 @@
|
|
1
|
+
require 'prometheus/client/uses_value_type'
|
2
|
+
require 'prometheus/client/helper/json_parser'
|
3
|
+
require 'prometheus/client/helper/plain_file'
|
4
|
+
require 'prometheus/client/helper/metrics_processing'
|
5
|
+
require 'prometheus/client/helper/metrics_representation'
|
6
|
+
|
7
|
+
module Prometheus
|
8
|
+
module Client
|
9
|
+
module Formats
|
10
|
+
# Protobuf format supports the same metric types as the text format plus
|
11
|
+
# native histograms. It is only available with the Rust extension.
|
12
|
+
module Protobuf
|
13
|
+
MEDIA_TYPE = 'application/vnd.google.protobuf'.freeze
|
14
|
+
PROTO = 'io.prometheus.client.MetricFamily'.freeze
|
15
|
+
ENCODING = 'delimited'.freeze
|
16
|
+
CONTENT_TYPE = "#{MEDIA_TYPE}; proto=#{PROTO}; encoding=#{ENCODING}".freeze
|
17
|
+
|
18
|
+
class << self
|
19
|
+
def marshal(registry)
|
20
|
+
metrics = registry.metrics.map do |metric|
|
21
|
+
samples = metric.values.flat_map do |label_set, value|
|
22
|
+
representation(metric, label_set, value)
|
23
|
+
end
|
24
|
+
|
25
|
+
[metric.name, { type: metric.type, help: metric.docstring, samples: samples }]
|
26
|
+
end
|
27
|
+
|
28
|
+
Helper::MetricsRepresentation.to_metrics(metrics)
|
29
|
+
end
|
30
|
+
|
31
|
+
def marshal_multiprocess(path = Prometheus::Client.configuration.multiprocess_files_dir, use_rust: true)
|
32
|
+
# NOTE(GiedriusS): need to ensure exemplar files go at the end because they add extra data.
|
33
|
+
file_list = Dir.glob(File.join(path, '*.db')).sort_by { |f| [f.include?('exemplar') ? 1 : 0, f] }
|
34
|
+
.map {|f| Helper::PlainFile.new(f) }
|
35
|
+
.map {|f| [f.filepath, f.multiprocess_mode.to_sym, f.type.to_sym, f.pid] }
|
36
|
+
|
37
|
+
FastMmapedFileRs.to_protobuf(file_list.to_a)
|
38
|
+
end
|
39
|
+
|
40
|
+
def rust_impl_available?
|
41
|
+
return @rust_available unless @rust_available.nil?
|
42
|
+
|
43
|
+
check_for_rust
|
44
|
+
end
|
45
|
+
|
46
|
+
private
|
47
|
+
|
48
|
+
def load_metrics(path)
|
49
|
+
metrics = {}
|
50
|
+
Dir.glob(File.join(path, '*.db')).sort.each do |f|
|
51
|
+
Helper::PlainFile.new(f).to_metrics(metrics)
|
52
|
+
end
|
53
|
+
|
54
|
+
metrics
|
55
|
+
end
|
56
|
+
|
57
|
+
def representation(metric, label_set, value)
|
58
|
+
labels = metric.base_labels.merge(label_set)
|
59
|
+
|
60
|
+
if metric.type == :summary
|
61
|
+
summary(metric.name, labels, value)
|
62
|
+
elsif metric.type == :histogram
|
63
|
+
histogram(metric.name, labels, value)
|
64
|
+
else
|
65
|
+
[[metric.name, labels, value.get]]
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
def summary(name, set, value)
|
70
|
+
rv = value.get.map do |q, v|
|
71
|
+
[name, set.merge(quantile: q), v]
|
72
|
+
end
|
73
|
+
|
74
|
+
rv << ["#{name}_sum", set, value.get.sum]
|
75
|
+
rv << ["#{name}_count", set, value.get.total]
|
76
|
+
rv
|
77
|
+
end
|
78
|
+
|
79
|
+
def histogram(name, set, value)
|
80
|
+
# |metric_name, labels, value|
|
81
|
+
rv = value.get.map do |q, v|
|
82
|
+
[name, set.merge(le: q), v]
|
83
|
+
end
|
84
|
+
|
85
|
+
rv << ["#{name}_sum", set, value.get.sum]
|
86
|
+
rv << ["#{name}_count", set, value.get.total]
|
87
|
+
rv
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
91
|
+
end
|
92
|
+
end
|
93
|
+
end
|
@@ -0,0 +1,85 @@
|
|
1
|
+
require 'prometheus/client/uses_value_type'
|
2
|
+
require 'prometheus/client/helper/json_parser'
|
3
|
+
require 'prometheus/client/helper/plain_file'
|
4
|
+
require 'prometheus/client/helper/metrics_processing'
|
5
|
+
require 'prometheus/client/helper/metrics_representation'
|
6
|
+
|
7
|
+
module Prometheus
|
8
|
+
module Client
|
9
|
+
module Formats
|
10
|
+
# Text format is human readable mainly used for manual inspection.
|
11
|
+
module Text
|
12
|
+
MEDIA_TYPE = 'text/plain'.freeze
|
13
|
+
VERSION = '0.0.4'.freeze
|
14
|
+
CONTENT_TYPE = "#{MEDIA_TYPE}; version=#{VERSION}".freeze
|
15
|
+
|
16
|
+
class << self
|
17
|
+
def marshal(registry)
|
18
|
+
metrics = registry.metrics.map do |metric|
|
19
|
+
samples = metric.values.flat_map do |label_set, value|
|
20
|
+
representation(metric, label_set, value)
|
21
|
+
end
|
22
|
+
|
23
|
+
[metric.name, { type: metric.type, help: metric.docstring, samples: samples }]
|
24
|
+
end
|
25
|
+
|
26
|
+
Helper::MetricsRepresentation.to_text(metrics)
|
27
|
+
end
|
28
|
+
|
29
|
+
def marshal_multiprocess(path = Prometheus::Client.configuration.multiprocess_files_dir)
|
30
|
+
file_list = Dir.glob(File.join(path, '*.db')).sort
|
31
|
+
.map {|f| Helper::PlainFile.new(f) }
|
32
|
+
.map {|f| [f.filepath, f.multiprocess_mode.to_sym, f.type.to_sym, f.pid] }
|
33
|
+
|
34
|
+
FastMmapedFileRs.to_metrics(file_list.to_a)
|
35
|
+
end
|
36
|
+
|
37
|
+
private
|
38
|
+
|
39
|
+
def load_metrics(path)
|
40
|
+
metrics = {}
|
41
|
+
Dir.glob(File.join(path, '*.db')).sort.each do |f|
|
42
|
+
Helper::PlainFile.new(f).to_metrics(metrics)
|
43
|
+
end
|
44
|
+
|
45
|
+
metrics
|
46
|
+
end
|
47
|
+
|
48
|
+
def representation(metric, label_set, value)
|
49
|
+
labels = metric.base_labels.merge(label_set)
|
50
|
+
|
51
|
+
if metric.type == :summary
|
52
|
+
summary(metric.name, labels, value)
|
53
|
+
elsif metric.type == :histogram
|
54
|
+
histogram(metric.name, labels, value)
|
55
|
+
else
|
56
|
+
[[metric.name, labels, value.get]]
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
def summary(name, set, value)
|
61
|
+
rv = value.get.map do |q, v|
|
62
|
+
[name, set.merge(quantile: q), v]
|
63
|
+
end
|
64
|
+
|
65
|
+
rv << ["#{name}_sum", set, value.get.sum]
|
66
|
+
rv << ["#{name}_count", set, value.get.total]
|
67
|
+
rv
|
68
|
+
end
|
69
|
+
|
70
|
+
def histogram(name, set, value)
|
71
|
+
# |metric_name, labels, value|
|
72
|
+
rv = value.get.map do |q, v|
|
73
|
+
[name, set.merge(le: q), v]
|
74
|
+
end
|
75
|
+
|
76
|
+
rv << [name, set.merge(le: '+Inf'), value.get.total]
|
77
|
+
rv << ["#{name}_sum", set, value.get.sum]
|
78
|
+
rv << ["#{name}_count", set, value.get.total]
|
79
|
+
rv
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
@@ -0,0 +1,40 @@
|
|
1
|
+
# encoding: UTF-8
|
2
|
+
|
3
|
+
require 'prometheus/client/metric'
|
4
|
+
|
5
|
+
module Prometheus
|
6
|
+
module Client
|
7
|
+
# A Gauge is a metric that exposes merely an instantaneous value or some
|
8
|
+
# snapshot thereof.
|
9
|
+
class Gauge < Metric
|
10
|
+
def initialize(name, docstring, base_labels = {}, multiprocess_mode=:all)
|
11
|
+
super(name, docstring, base_labels)
|
12
|
+
if value_class.multiprocess and ![:min, :max, :livesum, :liveall, :all].include?(multiprocess_mode)
|
13
|
+
raise ArgumentError, 'Invalid multiprocess mode: ' + multiprocess_mode
|
14
|
+
end
|
15
|
+
@multiprocess_mode = multiprocess_mode
|
16
|
+
end
|
17
|
+
|
18
|
+
def type
|
19
|
+
:gauge
|
20
|
+
end
|
21
|
+
|
22
|
+
def default(labels)
|
23
|
+
value_object(type, @name, @name, labels, @multiprocess_mode)
|
24
|
+
end
|
25
|
+
|
26
|
+
# Sets the value for the given label set
|
27
|
+
def set(labels, value)
|
28
|
+
@values[label_set_for(labels)].set(value)
|
29
|
+
end
|
30
|
+
|
31
|
+
def increment(labels, value)
|
32
|
+
@values[label_set_for(labels)].increment(value)
|
33
|
+
end
|
34
|
+
|
35
|
+
def decrement(labels, value)
|
36
|
+
@values[label_set_for(labels)].decrement(value)
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|