vinted-prometheus-client-mmap 1.5.0-x86_64-linux
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/README.md +5 -0
- data/ext/fast_mmaped_file_rs/Cargo.toml +40 -0
- data/ext/fast_mmaped_file_rs/README.md +52 -0
- data/ext/fast_mmaped_file_rs/build.rs +7 -0
- data/ext/fast_mmaped_file_rs/extconf.rb +28 -0
- data/ext/fast_mmaped_file_rs/src/error.rs +174 -0
- data/ext/fast_mmaped_file_rs/src/exemplars.rs +25 -0
- data/ext/fast_mmaped_file_rs/src/file_entry.rs +1252 -0
- data/ext/fast_mmaped_file_rs/src/file_info.rs +240 -0
- data/ext/fast_mmaped_file_rs/src/lib.rs +89 -0
- data/ext/fast_mmaped_file_rs/src/macros.rs +14 -0
- data/ext/fast_mmaped_file_rs/src/map.rs +519 -0
- data/ext/fast_mmaped_file_rs/src/metrics.proto +153 -0
- data/ext/fast_mmaped_file_rs/src/mmap/inner.rs +775 -0
- data/ext/fast_mmaped_file_rs/src/mmap.rs +977 -0
- data/ext/fast_mmaped_file_rs/src/raw_entry.rs +547 -0
- data/ext/fast_mmaped_file_rs/src/testhelper.rs +222 -0
- data/ext/fast_mmaped_file_rs/src/util.rs +140 -0
- data/lib/.DS_Store +0 -0
- data/lib/2.7/fast_mmaped_file_rs.so +0 -0
- data/lib/3.0/fast_mmaped_file_rs.so +0 -0
- data/lib/3.1/fast_mmaped_file_rs.so +0 -0
- data/lib/3.2/fast_mmaped_file_rs.so +0 -0
- data/lib/3.3/fast_mmaped_file_rs.so +0 -0
- data/lib/prometheus/.DS_Store +0 -0
- data/lib/prometheus/client/configuration.rb +24 -0
- data/lib/prometheus/client/counter.rb +27 -0
- data/lib/prometheus/client/formats/protobuf.rb +93 -0
- data/lib/prometheus/client/formats/text.rb +85 -0
- data/lib/prometheus/client/gauge.rb +40 -0
- data/lib/prometheus/client/helper/entry_parser.rb +132 -0
- data/lib/prometheus/client/helper/file_locker.rb +50 -0
- data/lib/prometheus/client/helper/json_parser.rb +23 -0
- data/lib/prometheus/client/helper/metrics_processing.rb +45 -0
- data/lib/prometheus/client/helper/metrics_representation.rb +51 -0
- data/lib/prometheus/client/helper/mmaped_file.rb +64 -0
- data/lib/prometheus/client/helper/plain_file.rb +29 -0
- data/lib/prometheus/client/histogram.rb +80 -0
- data/lib/prometheus/client/label_set_validator.rb +85 -0
- data/lib/prometheus/client/metric.rb +80 -0
- data/lib/prometheus/client/mmaped_dict.rb +83 -0
- data/lib/prometheus/client/mmaped_value.rb +164 -0
- data/lib/prometheus/client/page_size.rb +17 -0
- data/lib/prometheus/client/push.rb +203 -0
- data/lib/prometheus/client/rack/collector.rb +88 -0
- data/lib/prometheus/client/rack/exporter.rb +102 -0
- data/lib/prometheus/client/registry.rb +65 -0
- data/lib/prometheus/client/simple_value.rb +31 -0
- data/lib/prometheus/client/summary.rb +69 -0
- data/lib/prometheus/client/support/puma.rb +44 -0
- data/lib/prometheus/client/support/unicorn.rb +35 -0
- data/lib/prometheus/client/uses_value_type.rb +20 -0
- data/lib/prometheus/client/version.rb +5 -0
- data/lib/prometheus/client.rb +58 -0
- data/lib/prometheus.rb +3 -0
- metadata +210 -0
@@ -0,0 +1,1252 @@
|
|
1
|
+
use core::panic;
|
2
|
+
use magnus::Symbol;
|
3
|
+
use serde::Deserialize;
|
4
|
+
use serde_json::value::RawValue;
|
5
|
+
use smallvec::SmallVec;
|
6
|
+
use std::fmt::Write;
|
7
|
+
use std::str;
|
8
|
+
|
9
|
+
use crate::error::{MmapError, RubyError};
|
10
|
+
use crate::exemplars::Exemplar;
|
11
|
+
use crate::file_info::FileInfo;
|
12
|
+
use crate::raw_entry::RawEntry;
|
13
|
+
use crate::Result;
|
14
|
+
use crate::{SYM_GAUGE, SYM_LIVESUM, SYM_MAX, SYM_MIN};
|
15
|
+
use std::io::Cursor;
|
16
|
+
use varint_rs::VarintWriter;
|
17
|
+
|
18
|
+
pub mod io {
|
19
|
+
pub mod prometheus {
|
20
|
+
pub mod client {
|
21
|
+
include!(concat!(env!("OUT_DIR"), "/io.prometheus.client.rs"));
|
22
|
+
}
|
23
|
+
}
|
24
|
+
}
|
25
|
+
|
26
|
+
/// A metrics entry extracted from a `*.db` file.
|
27
|
+
#[derive(Clone, Debug)]
|
28
|
+
pub struct FileEntry {
|
29
|
+
pub data: EntryData,
|
30
|
+
pub meta: EntryMetadata,
|
31
|
+
}
|
32
|
+
|
33
|
+
/// String slices pointing to the fields of a borrowed `Entry`'s JSON data.
|
34
|
+
#[derive(Deserialize, Debug, Clone)]
|
35
|
+
pub struct MetricText<'a> {
|
36
|
+
pub family_name: &'a str,
|
37
|
+
pub metric_name: &'a str,
|
38
|
+
pub labels: SmallVec<[&'a str; 4]>,
|
39
|
+
#[serde(borrow)]
|
40
|
+
pub values: SmallVec<[&'a RawValue; 4]>,
|
41
|
+
}
|
42
|
+
|
43
|
+
/// The primary data payload for a `FileEntry`, the JSON string and the
|
44
|
+
/// associated pid, if significant. Used as the key for `EntryMap`.
|
45
|
+
#[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Clone, Debug)]
|
46
|
+
pub struct EntryData {
|
47
|
+
pub json: String,
|
48
|
+
pub pid: Option<String>,
|
49
|
+
}
|
50
|
+
|
51
|
+
impl<'a> PartialEq<BorrowedData<'a>> for EntryData {
|
52
|
+
fn eq(&self, other: &BorrowedData) -> bool {
|
53
|
+
self.pid.as_deref() == other.pid && self.json == other.json
|
54
|
+
}
|
55
|
+
}
|
56
|
+
|
57
|
+
impl<'a> TryFrom<BorrowedData<'a>> for EntryData {
|
58
|
+
type Error = MmapError;
|
59
|
+
|
60
|
+
fn try_from(borrowed: BorrowedData) -> Result<Self> {
|
61
|
+
let mut json = String::new();
|
62
|
+
if json.try_reserve_exact(borrowed.json.len()).is_err() {
|
63
|
+
return Err(MmapError::OutOfMemory(borrowed.json.len()));
|
64
|
+
}
|
65
|
+
json.push_str(borrowed.json);
|
66
|
+
|
67
|
+
Ok(Self {
|
68
|
+
json,
|
69
|
+
// Don't bother checking for allocation failure, typically ~10 bytes
|
70
|
+
pid: borrowed.pid.map(|p| p.to_string()),
|
71
|
+
})
|
72
|
+
}
|
73
|
+
}
|
74
|
+
|
75
|
+
/// A borrowed copy of the JSON string and pid for a `FileEntry`. We use this
|
76
|
+
/// to check if a given string/pid combination is present in the `EntryMap`,
|
77
|
+
/// copying them to owned values only when needed.
|
78
|
+
#[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Clone, Debug)]
|
79
|
+
pub struct BorrowedData<'a> {
|
80
|
+
pub json: &'a str,
|
81
|
+
pub pid: Option<&'a str>,
|
82
|
+
}
|
83
|
+
|
84
|
+
impl<'a> BorrowedData<'a> {
|
85
|
+
pub fn new(
|
86
|
+
raw_entry: &'a RawEntry,
|
87
|
+
file_info: &'a FileInfo,
|
88
|
+
pid_significant: bool,
|
89
|
+
) -> Result<Self> {
|
90
|
+
let json = str::from_utf8(raw_entry.json())
|
91
|
+
.map_err(|e| MmapError::Encoding(format!("invalid UTF-8 in entry JSON: {e}")))?;
|
92
|
+
|
93
|
+
let pid = if pid_significant {
|
94
|
+
Some(file_info.pid.as_str())
|
95
|
+
} else {
|
96
|
+
None
|
97
|
+
};
|
98
|
+
|
99
|
+
Ok(Self { json, pid })
|
100
|
+
}
|
101
|
+
}
|
102
|
+
|
103
|
+
/// The metadata associated with a `FileEntry`. The value in `EntryMap`.
|
104
|
+
#[derive(Clone, Debug)]
|
105
|
+
pub struct EntryMetadata {
|
106
|
+
pub multiprocess_mode: Symbol,
|
107
|
+
pub type_: Symbol,
|
108
|
+
pub value: Option<f64>,
|
109
|
+
pub ex: Option<Exemplar>,
|
110
|
+
}
|
111
|
+
|
112
|
+
impl EntryMetadata {
|
113
|
+
/// Construct a new `FileEntry`, copying the JSON string from the `RawEntry`
|
114
|
+
/// into an internal buffer.
|
115
|
+
pub fn new(mmap_entry: &RawEntry, file: &FileInfo) -> Result<Self> {
|
116
|
+
if file.type_.to_string() == "exemplar" {
|
117
|
+
let ex = mmap_entry.exemplar();
|
118
|
+
|
119
|
+
|
120
|
+
|
121
|
+
return Ok(EntryMetadata {
|
122
|
+
multiprocess_mode: file.multiprocess_mode,
|
123
|
+
type_: file.type_,
|
124
|
+
value: None,
|
125
|
+
ex: Some(ex),
|
126
|
+
})
|
127
|
+
}
|
128
|
+
|
129
|
+
let value = mmap_entry.value();
|
130
|
+
|
131
|
+
Ok(EntryMetadata {
|
132
|
+
multiprocess_mode: file.multiprocess_mode,
|
133
|
+
type_: file.type_,
|
134
|
+
value: Some(value),
|
135
|
+
ex: None,
|
136
|
+
})
|
137
|
+
}
|
138
|
+
|
139
|
+
/// Combine values with another `EntryMetadata`.
|
140
|
+
pub fn merge(&mut self, other: &Self) {
|
141
|
+
if other.ex.is_some() {
|
142
|
+
let otherex = other.ex.clone().unwrap();
|
143
|
+
|
144
|
+
if self.ex.is_some() {
|
145
|
+
let selfex = self.ex.clone().unwrap();
|
146
|
+
|
147
|
+
if selfex.timestamp < otherex.timestamp {
|
148
|
+
self.ex = other.ex.clone();
|
149
|
+
}
|
150
|
+
} else {
|
151
|
+
self.ex = other.ex.clone();
|
152
|
+
}
|
153
|
+
}
|
154
|
+
if other.value.is_some() {
|
155
|
+
if self.value.is_none() {
|
156
|
+
self.value = other.value;
|
157
|
+
} else {
|
158
|
+
let other_value = other.value.unwrap();
|
159
|
+
let self_value = self.value.unwrap();
|
160
|
+
|
161
|
+
if self.type_ == SYM_GAUGE {
|
162
|
+
match self.multiprocess_mode {
|
163
|
+
s if s == SYM_MIN => self.value = Some(self_value.min(other_value)),
|
164
|
+
s if s == SYM_MAX => self.value = Some(self_value.max(other_value)),
|
165
|
+
s if s == SYM_LIVESUM => self.value = Some(self_value + other_value),
|
166
|
+
_ => self.value = Some(other_value),
|
167
|
+
}
|
168
|
+
} else {
|
169
|
+
self.value = Some(self_value + other_value);
|
170
|
+
}
|
171
|
+
}
|
172
|
+
}
|
173
|
+
|
174
|
+
}
|
175
|
+
|
176
|
+
/// Validate if pid is significant for metric.
|
177
|
+
pub fn is_pid_significant(&self) -> bool {
|
178
|
+
let mp = self.multiprocess_mode;
|
179
|
+
|
180
|
+
self.type_ == SYM_GAUGE && !(mp == SYM_MIN || mp == SYM_MAX || mp == SYM_LIVESUM)
|
181
|
+
}
|
182
|
+
}
|
183
|
+
|
184
|
+
use crate::io::prometheus::client::MetricType::{Counter, Gauge, Histogram, Summary};
|
185
|
+
use itertools::Itertools;
|
186
|
+
use prost::Message;
|
187
|
+
use std::collections::hash_map::DefaultHasher;
|
188
|
+
use std::collections::HashMap;
|
189
|
+
use std::hash::Hash;
|
190
|
+
use std::hash::Hasher;
|
191
|
+
|
192
|
+
use std::io::Write as OtherWrite;
|
193
|
+
|
194
|
+
fn exemplar_to_proto(e: &Exemplar) -> io::prometheus::client::Exemplar {
|
195
|
+
let seconds = e.timestamp / (1000 * 1000 * 1000);
|
196
|
+
let nanos = e.timestamp % (1000 * 1000 * 1000);
|
197
|
+
|
198
|
+
io::prometheus::client::Exemplar {
|
199
|
+
label: vec![io::prometheus::client::LabelPair {
|
200
|
+
name: Some(e.label_name.clone()),
|
201
|
+
value: Some(e.label_value.clone()),
|
202
|
+
}],
|
203
|
+
value: Some(e.value),
|
204
|
+
timestamp: Some(prost_types::Timestamp {
|
205
|
+
seconds: seconds as i64,
|
206
|
+
nanos: nanos as i32,
|
207
|
+
}),
|
208
|
+
}
|
209
|
+
}
|
210
|
+
|
211
|
+
impl FileEntry {
|
212
|
+
pub fn trim_quotes(s: &str) -> String {
|
213
|
+
let mut chars = s.chars();
|
214
|
+
|
215
|
+
if s.starts_with('"') {
|
216
|
+
chars.next();
|
217
|
+
}
|
218
|
+
if s.ends_with('"') {
|
219
|
+
chars.next_back();
|
220
|
+
}
|
221
|
+
|
222
|
+
chars.as_str().to_string()
|
223
|
+
}
|
224
|
+
|
225
|
+
pub fn entries_to_protobuf(entries: Vec<FileEntry>) -> Result<String> {
|
226
|
+
let mut buffer: Cursor<Vec<u8>> = Cursor::new(Vec::new());
|
227
|
+
let mut mtrcs: HashMap<u64, io::prometheus::client::Metric> = HashMap::new();
|
228
|
+
let mut metric_types = HashMap::new();
|
229
|
+
let mut metric_names = HashMap::new();
|
230
|
+
|
231
|
+
entries
|
232
|
+
.iter()
|
233
|
+
// TODO: Don't just unwrap. Handle the error gracefully.
|
234
|
+
.map(|v| {
|
235
|
+
(
|
236
|
+
v,
|
237
|
+
serde_json::from_str::<MetricText>(&v.data.json)
|
238
|
+
.expect("cannot parse json entry"),
|
239
|
+
v.meta.type_.name().expect("getting name").into_owned(),
|
240
|
+
)
|
241
|
+
})
|
242
|
+
.filter(|v| v.1.labels.len() == v.1.values.len())
|
243
|
+
.group_by(|v| v.1.family_name)
|
244
|
+
.into_iter()
|
245
|
+
.for_each(|(_, group)| {
|
246
|
+
// NOTE(GiedriusS): different dynamic labels fall under the same
|
247
|
+
// metric group.
|
248
|
+
|
249
|
+
'outer: for gr in group {
|
250
|
+
let metric_type = gr.2;
|
251
|
+
|
252
|
+
let lbls =
|
253
|
+
gr.1.labels
|
254
|
+
.iter()
|
255
|
+
.map(|l| Self::trim_quotes(l))
|
256
|
+
.zip(gr.1.values.iter().map(|v| Self::trim_quotes(v.get())));
|
257
|
+
|
258
|
+
let mut m = io::prometheus::client::Metric {
|
259
|
+
label: lbls
|
260
|
+
.clone()
|
261
|
+
.map(|l| io::prometheus::client::LabelPair {
|
262
|
+
name: Some(l.0),
|
263
|
+
value: Some(l.1.to_string()),
|
264
|
+
})
|
265
|
+
.collect::<Vec<io::prometheus::client::LabelPair>>(),
|
266
|
+
gauge: None,
|
267
|
+
counter: None,
|
268
|
+
summary: None,
|
269
|
+
untyped: None,
|
270
|
+
histogram: None,
|
271
|
+
timestamp_ms: None,
|
272
|
+
};
|
273
|
+
|
274
|
+
match metric_type.as_str() {
|
275
|
+
"counter" => {
|
276
|
+
let mut hasher = DefaultHasher::new();
|
277
|
+
|
278
|
+
// Iterate over the tuples and hash their elements
|
279
|
+
for (a, b) in lbls {
|
280
|
+
a.hash(&mut hasher);
|
281
|
+
b.hash(&mut hasher);
|
282
|
+
}
|
283
|
+
"counter".hash(&mut hasher);
|
284
|
+
|
285
|
+
// Get the final u64 hash value
|
286
|
+
let hash_value = hasher.finish();
|
287
|
+
|
288
|
+
m.counter = Some(io::prometheus::client::Counter {
|
289
|
+
value: gr.0.meta.value,
|
290
|
+
created_timestamp: None,
|
291
|
+
exemplar: None,
|
292
|
+
});
|
293
|
+
|
294
|
+
if gr.0.meta.ex.is_some() {
|
295
|
+
m.counter.as_mut().unwrap().exemplar =
|
296
|
+
Some(exemplar_to_proto(gr.0.meta.ex.as_ref().unwrap()));
|
297
|
+
}
|
298
|
+
|
299
|
+
mtrcs.insert(hash_value, m);
|
300
|
+
metric_types.insert(hash_value, "counter");
|
301
|
+
metric_names.insert(hash_value, gr.1.metric_name);
|
302
|
+
}
|
303
|
+
"gauge" => {
|
304
|
+
let mut hasher = DefaultHasher::new();
|
305
|
+
|
306
|
+
// Iterate over the tuples and hash their elements
|
307
|
+
for (a, b) in lbls {
|
308
|
+
a.hash(&mut hasher);
|
309
|
+
b.hash(&mut hasher);
|
310
|
+
}
|
311
|
+
"gauge".hash(&mut hasher);
|
312
|
+
|
313
|
+
let hash_value = hasher.finish();
|
314
|
+
|
315
|
+
m.gauge = Some(io::prometheus::client::Gauge {
|
316
|
+
value: gr.0.meta.value,
|
317
|
+
});
|
318
|
+
mtrcs.insert(hash_value, m);
|
319
|
+
metric_types.insert(hash_value, "gauge");
|
320
|
+
metric_names.insert(hash_value, gr.1.metric_name);
|
321
|
+
}
|
322
|
+
"histogram" => {
|
323
|
+
let mut hasher = DefaultHasher::new();
|
324
|
+
|
325
|
+
let mut le: Option<f64> = None;
|
326
|
+
|
327
|
+
// Iterate over the tuples and hash their elements
|
328
|
+
for (a, b) in lbls {
|
329
|
+
if a != "le" {
|
330
|
+
a.hash(&mut hasher);
|
331
|
+
b.hash(&mut hasher);
|
332
|
+
}
|
333
|
+
|
334
|
+
// Safe to ignore +Inf bound.
|
335
|
+
if a == "le" {
|
336
|
+
if b == "+Inf" {
|
337
|
+
continue 'outer;
|
338
|
+
}
|
339
|
+
let leparsed = b.parse::<f64>();
|
340
|
+
match leparsed {
|
341
|
+
Ok(p) => le = Some(p),
|
342
|
+
Err(e) => panic!("failed to parse {} due to {}", b, e),
|
343
|
+
}
|
344
|
+
}
|
345
|
+
}
|
346
|
+
"histogram".hash(&mut hasher);
|
347
|
+
|
348
|
+
let hash_value = hasher.finish();
|
349
|
+
|
350
|
+
match mtrcs.get_mut(&hash_value) {
|
351
|
+
Some(v) => {
|
352
|
+
let hs =
|
353
|
+
v.histogram.as_mut().expect("getting mutable histogram");
|
354
|
+
|
355
|
+
for bucket in &mut hs.bucket {
|
356
|
+
if bucket.upper_bound != le {
|
357
|
+
continue;
|
358
|
+
}
|
359
|
+
|
360
|
+
let mut curf: f64 =
|
361
|
+
bucket.cumulative_count_float.unwrap_or_default();
|
362
|
+
curf += gr.0.meta.value.unwrap();
|
363
|
+
|
364
|
+
bucket.cumulative_count_float = Some(curf);
|
365
|
+
|
366
|
+
if gr.0.meta.ex.is_some() {
|
367
|
+
bucket.exemplar =
|
368
|
+
Some(exemplar_to_proto(gr.0.meta.ex.as_ref().unwrap()));
|
369
|
+
}
|
370
|
+
}
|
371
|
+
}
|
372
|
+
None => {
|
373
|
+
let mut final_metric_name = gr.1.metric_name;
|
374
|
+
|
375
|
+
if let Some(stripped) =
|
376
|
+
final_metric_name.strip_suffix("_bucket")
|
377
|
+
{
|
378
|
+
final_metric_name = stripped;
|
379
|
+
}
|
380
|
+
if let Some(stripped) = final_metric_name.strip_suffix("_sum") {
|
381
|
+
final_metric_name = stripped;
|
382
|
+
}
|
383
|
+
if let Some(stripped) = final_metric_name.strip_suffix("_count")
|
384
|
+
{
|
385
|
+
final_metric_name = stripped;
|
386
|
+
}
|
387
|
+
|
388
|
+
let mut buckets = vec![io::prometheus::client::Bucket {
|
389
|
+
cumulative_count: None,
|
390
|
+
cumulative_count_float: gr.0.meta.value,
|
391
|
+
upper_bound: Some(
|
392
|
+
le.expect(
|
393
|
+
&format!("got no LE for {}", gr.1.metric_name)
|
394
|
+
.to_string(),
|
395
|
+
),
|
396
|
+
),
|
397
|
+
exemplar: None,
|
398
|
+
}];
|
399
|
+
|
400
|
+
if gr.0.meta.ex.is_some() {
|
401
|
+
buckets[0].exemplar =
|
402
|
+
Some(exemplar_to_proto(gr.0.meta.ex.as_ref().unwrap()));
|
403
|
+
}
|
404
|
+
m.label = m
|
405
|
+
.label
|
406
|
+
.into_iter()
|
407
|
+
.filter(|l| l.name != Some("le".to_string()))
|
408
|
+
.collect_vec();
|
409
|
+
// Create a new metric.
|
410
|
+
m.histogram = Some(io::prometheus::client::Histogram {
|
411
|
+
// All native histogram fields.
|
412
|
+
sample_count: None,
|
413
|
+
sample_count_float: None,
|
414
|
+
sample_sum: None,
|
415
|
+
created_timestamp: None,
|
416
|
+
schema: None,
|
417
|
+
zero_count: None,
|
418
|
+
zero_count_float: None,
|
419
|
+
zero_threshold: None,
|
420
|
+
negative_count: vec![],
|
421
|
+
negative_delta: vec![],
|
422
|
+
negative_span: vec![],
|
423
|
+
positive_count: vec![],
|
424
|
+
positive_delta: vec![],
|
425
|
+
positive_span: vec![],
|
426
|
+
// All classic histogram fields.
|
427
|
+
bucket: buckets,
|
428
|
+
});
|
429
|
+
mtrcs.insert(hash_value, m);
|
430
|
+
metric_types.insert(hash_value, "histogram");
|
431
|
+
metric_names.insert(hash_value, final_metric_name);
|
432
|
+
}
|
433
|
+
}
|
434
|
+
}
|
435
|
+
"summary" => {
|
436
|
+
let mut hasher = DefaultHasher::new();
|
437
|
+
|
438
|
+
let mut quantile: Option<f64> = None;
|
439
|
+
|
440
|
+
// Iterate over the tuples and hash their elements
|
441
|
+
for (a, b) in lbls {
|
442
|
+
if a != "quantile" {
|
443
|
+
a.hash(&mut hasher);
|
444
|
+
b.hash(&mut hasher);
|
445
|
+
}
|
446
|
+
if a == "quantile" {
|
447
|
+
let quantileparsed = b.parse::<f64>();
|
448
|
+
match quantileparsed {
|
449
|
+
Ok(p) => quantile = Some(p),
|
450
|
+
Err(e) => {
|
451
|
+
panic!("failed to parse quantile {} due to {}", b, e)
|
452
|
+
}
|
453
|
+
}
|
454
|
+
}
|
455
|
+
}
|
456
|
+
"summary".hash(&mut hasher);
|
457
|
+
let hash_value = hasher.finish();
|
458
|
+
|
459
|
+
match mtrcs.get_mut(&hash_value) {
|
460
|
+
Some(v) => {
|
461
|
+
// Go through and edit buckets.
|
462
|
+
let smry = v.summary.as_mut().expect(
|
463
|
+
&format!(
|
464
|
+
"getting mutable summary for {}",
|
465
|
+
gr.1.metric_name
|
466
|
+
)
|
467
|
+
.to_string(),
|
468
|
+
);
|
469
|
+
|
470
|
+
if gr.1.metric_name.ends_with("_count") {
|
471
|
+
let samplecount = smry.sample_count.unwrap_or_default();
|
472
|
+
smry.sample_count =
|
473
|
+
Some((gr.0.meta.value.unwrap() as u64) + samplecount);
|
474
|
+
} else if gr.1.metric_name.ends_with("_sum") {
|
475
|
+
let samplesum: f64 = smry.sample_sum.unwrap_or_default();
|
476
|
+
smry.sample_sum = Some(gr.0.meta.value.unwrap() + samplesum);
|
477
|
+
} else {
|
478
|
+
let mut found_quantile = false;
|
479
|
+
for qntl in &mut smry.quantile {
|
480
|
+
if qntl.quantile != quantile {
|
481
|
+
continue;
|
482
|
+
}
|
483
|
+
|
484
|
+
let mut curq: f64 = qntl.quantile.unwrap_or_default();
|
485
|
+
curq += gr.0.meta.value.unwrap();
|
486
|
+
|
487
|
+
qntl.quantile = Some(curq);
|
488
|
+
found_quantile = true;
|
489
|
+
}
|
490
|
+
|
491
|
+
if !found_quantile {
|
492
|
+
smry.quantile.push(io::prometheus::client::Quantile {
|
493
|
+
quantile: quantile,
|
494
|
+
value: gr.0.meta.value,
|
495
|
+
});
|
496
|
+
}
|
497
|
+
}
|
498
|
+
}
|
499
|
+
None => {
|
500
|
+
m.label = m
|
501
|
+
.label
|
502
|
+
.into_iter()
|
503
|
+
.filter(|l| l.name != Some("quantile".to_string()))
|
504
|
+
.collect_vec();
|
505
|
+
|
506
|
+
let mut final_metric_name = gr.1.metric_name;
|
507
|
+
// If quantile then add to quantiles.
|
508
|
+
// if ends with _count then add it to count.
|
509
|
+
// If ends with _sum then add it to sum.
|
510
|
+
if gr.1.metric_name.ends_with("_count") {
|
511
|
+
final_metric_name =
|
512
|
+
gr.1.metric_name.strip_suffix("_count").unwrap();
|
513
|
+
m.summary = Some(io::prometheus::client::Summary {
|
514
|
+
quantile: vec![],
|
515
|
+
sample_count: Some(gr.0.meta.value.unwrap() as u64),
|
516
|
+
sample_sum: None,
|
517
|
+
created_timestamp: None,
|
518
|
+
});
|
519
|
+
} else if gr.1.metric_name.ends_with("_sum") {
|
520
|
+
final_metric_name =
|
521
|
+
gr.1.metric_name.strip_suffix("_sum").unwrap();
|
522
|
+
m.summary = Some(io::prometheus::client::Summary {
|
523
|
+
quantile: vec![],
|
524
|
+
sample_sum: Some(gr.0.meta.value.unwrap()),
|
525
|
+
sample_count: None,
|
526
|
+
created_timestamp: None,
|
527
|
+
});
|
528
|
+
} else {
|
529
|
+
let quantiles = vec![io::prometheus::client::Quantile {
|
530
|
+
quantile: quantile,
|
531
|
+
value: gr.0.meta.value,
|
532
|
+
}];
|
533
|
+
m.summary = Some(io::prometheus::client::Summary {
|
534
|
+
quantile: quantiles,
|
535
|
+
sample_count: None,
|
536
|
+
sample_sum: None,
|
537
|
+
created_timestamp: None,
|
538
|
+
});
|
539
|
+
}
|
540
|
+
|
541
|
+
mtrcs.insert(hash_value, m);
|
542
|
+
metric_types.insert(hash_value, "summary");
|
543
|
+
metric_names.insert(hash_value, final_metric_name);
|
544
|
+
}
|
545
|
+
}
|
546
|
+
}
|
547
|
+
"exemplar" => {
|
548
|
+
// Exemplars are handled later on.
|
549
|
+
}
|
550
|
+
mtype => {
|
551
|
+
panic!("unhandled metric type {}", mtype)
|
552
|
+
}
|
553
|
+
}
|
554
|
+
}
|
555
|
+
});
|
556
|
+
|
557
|
+
mtrcs.iter().for_each(|mtrc| {
|
558
|
+
let metric_name = metric_names.get(mtrc.0).expect("getting metric name");
|
559
|
+
let metric_type = metric_types.get(mtrc.0).expect("getting metric type");
|
560
|
+
|
561
|
+
let protobuf_mf = io::prometheus::client::MetricFamily {
|
562
|
+
name: Some(metric_name.to_string()),
|
563
|
+
help: Some("Multiprocess metric".to_string()),
|
564
|
+
r#type: match metric_type.to_string().as_str() {
|
565
|
+
"counter" => Some(Counter.into()),
|
566
|
+
"gauge" => Some(Gauge.into()),
|
567
|
+
"histogram" => Some(Histogram.into()),
|
568
|
+
"summary" => Some(Summary.into()),
|
569
|
+
mtype => panic!("unhandled metric type {}", mtype),
|
570
|
+
},
|
571
|
+
metric: vec![mtrc.1.clone()],
|
572
|
+
};
|
573
|
+
|
574
|
+
let encoded_mf = protobuf_mf.encode_to_vec();
|
575
|
+
|
576
|
+
buffer
|
577
|
+
.write_u32_varint(
|
578
|
+
encoded_mf
|
579
|
+
.len()
|
580
|
+
.try_into()
|
581
|
+
.expect("failed to encode metricfamily"),
|
582
|
+
)
|
583
|
+
.unwrap();
|
584
|
+
buffer
|
585
|
+
.write_all(&encoded_mf)
|
586
|
+
.expect("failed to write output");
|
587
|
+
});
|
588
|
+
|
589
|
+
// NOTE: Rust strings are bytes encoded in UTF-8. Ruby doesn't have such
|
590
|
+
// invariant. So, let's convert those bytes to a string since everything ends
|
591
|
+
// up as a string in Ruby.
|
592
|
+
unsafe { Ok(str::from_utf8_unchecked(buffer.get_ref()).to_string()) }
|
593
|
+
}
|
594
|
+
|
595
|
+
|
596
|
+
|
597
|
+
/// Convert the sorted entries into a String in Prometheus metrics format.
|
598
|
+
pub fn entries_to_string(entries: Vec<FileEntry>) -> Result<String> {
|
599
|
+
// We guesstimate that lines are ~100 bytes long, preallocate the string to
|
600
|
+
// roughly that size.
|
601
|
+
let mut out = String::new();
|
602
|
+
out.try_reserve(entries.len() * 128)
|
603
|
+
.map_err(|_| MmapError::OutOfMemory(entries.len() * 128))?;
|
604
|
+
|
605
|
+
let mut prev_name: Option<String> = None;
|
606
|
+
|
607
|
+
let entry_count = entries.len();
|
608
|
+
let mut processed_count = 0;
|
609
|
+
|
610
|
+
for entry in entries {
|
611
|
+
let metrics_data = match serde_json::from_str::<MetricText>(&entry.data.json) {
|
612
|
+
Ok(m) => {
|
613
|
+
if m.labels.len() != m.values.len() {
|
614
|
+
continue;
|
615
|
+
}
|
616
|
+
m
|
617
|
+
}
|
618
|
+
// We don't exit the function here so the total number of invalid
|
619
|
+
// entries can be calculated below.
|
620
|
+
Err(_) => continue,
|
621
|
+
};
|
622
|
+
|
623
|
+
match prev_name.as_ref() {
|
624
|
+
Some(p) if p == metrics_data.family_name => {}
|
625
|
+
_ => {
|
626
|
+
entry.append_header(metrics_data.family_name, &mut out);
|
627
|
+
prev_name = Some(metrics_data.family_name.to_owned());
|
628
|
+
}
|
629
|
+
}
|
630
|
+
|
631
|
+
entry.append_entry(metrics_data, &mut out)?;
|
632
|
+
|
633
|
+
writeln!(&mut out, " {}", entry.meta.value.unwrap())
|
634
|
+
.map_err(|e| MmapError::Other(format!("Failed to append to output: {e}")))?;
|
635
|
+
|
636
|
+
processed_count += 1;
|
637
|
+
}
|
638
|
+
|
639
|
+
if processed_count != entry_count {
|
640
|
+
return Err(MmapError::legacy(
|
641
|
+
format!("Processed entries {processed_count} != map entries {entry_count}"),
|
642
|
+
RubyError::Runtime,
|
643
|
+
));
|
644
|
+
}
|
645
|
+
|
646
|
+
Ok(out)
|
647
|
+
}
|
648
|
+
|
649
|
+
fn append_header(&self, family_name: &str, out: &mut String) {
|
650
|
+
out.push_str("# HELP ");
|
651
|
+
out.push_str(family_name);
|
652
|
+
out.push_str(" Multiprocess metric\n");
|
653
|
+
|
654
|
+
out.push_str("# TYPE ");
|
655
|
+
out.push_str(family_name);
|
656
|
+
out.push(' ');
|
657
|
+
|
658
|
+
out.push_str(&self.meta.type_.name().expect("name was invalid UTF-8"));
|
659
|
+
out.push('\n');
|
660
|
+
}
|
661
|
+
|
662
|
+
fn append_entry(&self, json_data: MetricText, out: &mut String) -> Result<()> {
|
663
|
+
out.push_str(json_data.metric_name);
|
664
|
+
|
665
|
+
if json_data.labels.is_empty() {
|
666
|
+
if let Some(pid) = self.data.pid.as_ref() {
|
667
|
+
out.push_str("{pid=\"");
|
668
|
+
out.push_str(pid);
|
669
|
+
out.push_str("\"}");
|
670
|
+
}
|
671
|
+
|
672
|
+
return Ok(());
|
673
|
+
}
|
674
|
+
|
675
|
+
out.push('{');
|
676
|
+
|
677
|
+
let it = json_data.labels.iter().zip(json_data.values.iter());
|
678
|
+
|
679
|
+
for (i, (&key, val)) in it.enumerate() {
|
680
|
+
out.push_str(key);
|
681
|
+
out.push('=');
|
682
|
+
|
683
|
+
match val.get() {
|
684
|
+
"null" => out.push_str("\"\""),
|
685
|
+
s if s.starts_with('"') => out.push_str(s),
|
686
|
+
s => {
|
687
|
+
// Quote numeric values.
|
688
|
+
out.push('"');
|
689
|
+
out.push_str(s);
|
690
|
+
out.push('"');
|
691
|
+
}
|
692
|
+
}
|
693
|
+
|
694
|
+
if i < json_data.labels.len() - 1 {
|
695
|
+
out.push(',');
|
696
|
+
}
|
697
|
+
}
|
698
|
+
|
699
|
+
if let Some(pid) = self.data.pid.as_ref() {
|
700
|
+
out.push_str(",pid=\"");
|
701
|
+
out.push_str(pid);
|
702
|
+
out.push('"');
|
703
|
+
}
|
704
|
+
|
705
|
+
out.push('}');
|
706
|
+
|
707
|
+
Ok(())
|
708
|
+
}
|
709
|
+
}
|
710
|
+
|
711
|
+
#[cfg(test)]
|
712
|
+
mod test {
|
713
|
+
use bstr::BString;
|
714
|
+
use indoc::indoc;
|
715
|
+
|
716
|
+
use super::*;
|
717
|
+
use crate::file_info::FileInfo;
|
718
|
+
use crate::raw_entry::RawEntry;
|
719
|
+
use crate::testhelper::{TestEntry, TestFile};
|
720
|
+
|
721
|
+
#[test]
|
722
|
+
fn test_trim_quotes() {
|
723
|
+
assert_eq!("foo", FileEntry::trim_quotes("foo"));
|
724
|
+
assert_eq!("foo", FileEntry::trim_quotes("\"foo\""));
|
725
|
+
}
|
726
|
+
|
727
|
+
#[test]
|
728
|
+
fn test_entries_to_string() {
|
729
|
+
struct TestCase {
|
730
|
+
name: &'static str,
|
731
|
+
multiprocess_mode: &'static str,
|
732
|
+
json: &'static [&'static str],
|
733
|
+
values: &'static [f64],
|
734
|
+
pids: &'static [&'static str],
|
735
|
+
expected_out: Option<&'static str>,
|
736
|
+
expected_err: Option<MmapError>,
|
737
|
+
}
|
738
|
+
|
739
|
+
let _cleanup = unsafe { magnus::embed::init() };
|
740
|
+
let ruby = magnus::Ruby::get().unwrap();
|
741
|
+
crate::init(&ruby).unwrap();
|
742
|
+
|
743
|
+
let tc = vec![
|
744
|
+
TestCase {
|
745
|
+
name: "one metric, pid significant",
|
746
|
+
multiprocess_mode: "all",
|
747
|
+
json: &[r#"["family","name",["label_a","label_b"],["value_a","value_b"]]"#],
|
748
|
+
values: &[1.0],
|
749
|
+
pids: &["worker-1"],
|
750
|
+
expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
|
751
|
+
# TYPE family gauge
|
752
|
+
name{label_a="value_a",label_b="value_b",pid="worker-1"} 1
|
753
|
+
"##}),
|
754
|
+
expected_err: None,
|
755
|
+
},
|
756
|
+
TestCase {
|
757
|
+
name: "one metric, no pid",
|
758
|
+
multiprocess_mode: "min",
|
759
|
+
json: &[r#"["family","name",["label_a","label_b"],["value_a","value_b"]]"#],
|
760
|
+
values: &[1.0],
|
761
|
+
pids: &["worker-1"],
|
762
|
+
expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
|
763
|
+
# TYPE family gauge
|
764
|
+
name{label_a="value_a",label_b="value_b"} 1
|
765
|
+
"##}),
|
766
|
+
expected_err: None,
|
767
|
+
},
|
768
|
+
TestCase {
|
769
|
+
name: "many labels",
|
770
|
+
multiprocess_mode: "min",
|
771
|
+
json: &[
|
772
|
+
r#"["family","name",["label_a","label_b","label_c","label_d","label_e"],["value_a","value_b","value_c","value_d","value_e"]]"#,
|
773
|
+
],
|
774
|
+
values: &[1.0],
|
775
|
+
pids: &["worker-1"],
|
776
|
+
expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
|
777
|
+
# TYPE family gauge
|
778
|
+
name{label_a="value_a",label_b="value_b",label_c="value_c",label_d="value_d",label_e="value_e"} 1
|
779
|
+
"##}),
|
780
|
+
expected_err: None,
|
781
|
+
},
|
782
|
+
TestCase {
|
783
|
+
name: "floating point shown",
|
784
|
+
multiprocess_mode: "min",
|
785
|
+
json: &[r#"["family","name",["label_a","label_b"],["value_a","value_b"]]"#],
|
786
|
+
values: &[1.5],
|
787
|
+
pids: &["worker-1"],
|
788
|
+
expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
|
789
|
+
# TYPE family gauge
|
790
|
+
name{label_a="value_a",label_b="value_b"} 1.5
|
791
|
+
"##}),
|
792
|
+
expected_err: None,
|
793
|
+
},
|
794
|
+
TestCase {
|
795
|
+
name: "numeric value",
|
796
|
+
multiprocess_mode: "min",
|
797
|
+
json: &[
|
798
|
+
r#"["family","name",["label_a","label_b","label_c"],["value_a",403,-0.2E5]]"#,
|
799
|
+
],
|
800
|
+
values: &[1.5],
|
801
|
+
pids: &["worker-1"],
|
802
|
+
expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
|
803
|
+
# TYPE family gauge
|
804
|
+
name{label_a="value_a",label_b="403",label_c="-0.2E5"} 1.5
|
805
|
+
"##}),
|
806
|
+
expected_err: None,
|
807
|
+
},
|
808
|
+
TestCase {
|
809
|
+
name: "null value",
|
810
|
+
multiprocess_mode: "min",
|
811
|
+
json: &[r#"["family","name",["label_a","label_b"],["value_a",null]]"#],
|
812
|
+
values: &[1.5],
|
813
|
+
pids: &["worker-1"],
|
814
|
+
expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
|
815
|
+
# TYPE family gauge
|
816
|
+
name{label_a="value_a",label_b=""} 1.5
|
817
|
+
"##}),
|
818
|
+
expected_err: None,
|
819
|
+
},
|
820
|
+
TestCase {
|
821
|
+
name: "comma in value",
|
822
|
+
multiprocess_mode: "min",
|
823
|
+
json: &[r#"["family","name",["label_a","label_b"],["value_a","value,_b"]]"#],
|
824
|
+
values: &[1.5],
|
825
|
+
pids: &["worker-1"],
|
826
|
+
expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
|
827
|
+
# TYPE family gauge
|
828
|
+
name{label_a="value_a",label_b="value,_b"} 1.5
|
829
|
+
"##}),
|
830
|
+
expected_err: None,
|
831
|
+
},
|
832
|
+
TestCase {
|
833
|
+
name: "no labels, pid significant",
|
834
|
+
multiprocess_mode: "all",
|
835
|
+
json: &[r#"["family","name",[],[]]"#],
|
836
|
+
values: &[1.0],
|
837
|
+
pids: &["worker-1"],
|
838
|
+
expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
|
839
|
+
# TYPE family gauge
|
840
|
+
name{pid="worker-1"} 1
|
841
|
+
"##}),
|
842
|
+
expected_err: None,
|
843
|
+
},
|
844
|
+
TestCase {
|
845
|
+
name: "no labels, no pid",
|
846
|
+
multiprocess_mode: "min",
|
847
|
+
json: &[r#"["family","name",[],[]]"#],
|
848
|
+
values: &[1.0],
|
849
|
+
pids: &["worker-1"],
|
850
|
+
expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
|
851
|
+
# TYPE family gauge
|
852
|
+
name 1
|
853
|
+
"##}),
|
854
|
+
expected_err: None,
|
855
|
+
},
|
856
|
+
TestCase {
|
857
|
+
name: "two metrics, same family, pid significant",
|
858
|
+
multiprocess_mode: "all",
|
859
|
+
json: &[
|
860
|
+
r#"["family","first",["label_a","label_b"],["value_a","value_b"]]"#,
|
861
|
+
r#"["family","second",["label_a","label_b"],["value_a","value_b"]]"#,
|
862
|
+
],
|
863
|
+
values: &[1.0, 2.0],
|
864
|
+
pids: &["worker-1", "worker-1"],
|
865
|
+
expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
|
866
|
+
# TYPE family gauge
|
867
|
+
first{label_a="value_a",label_b="value_b",pid="worker-1"} 1
|
868
|
+
second{label_a="value_a",label_b="value_b",pid="worker-1"} 2
|
869
|
+
"##}),
|
870
|
+
expected_err: None,
|
871
|
+
},
|
872
|
+
TestCase {
|
873
|
+
name: "two metrics, different family, pid significant",
|
874
|
+
multiprocess_mode: "min",
|
875
|
+
json: &[
|
876
|
+
r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
|
877
|
+
r#"["second_family","second_name",["label_a","label_b"],["value_a","value_b"]]"#,
|
878
|
+
],
|
879
|
+
values: &[1.0, 2.0],
|
880
|
+
pids: &["worker-1", "worker-1"],
|
881
|
+
expected_out: Some(indoc! {r##"# HELP first_family Multiprocess metric
|
882
|
+
# TYPE first_family gauge
|
883
|
+
first_name{label_a="value_a",label_b="value_b"} 1
|
884
|
+
# HELP second_family Multiprocess metric
|
885
|
+
# TYPE second_family gauge
|
886
|
+
second_name{label_a="value_a",label_b="value_b"} 2
|
887
|
+
"##}),
|
888
|
+
expected_err: None,
|
889
|
+
},
|
890
|
+
TestCase {
|
891
|
+
name: "three metrics, two different families, pid significant",
|
892
|
+
multiprocess_mode: "all",
|
893
|
+
json: &[
|
894
|
+
r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
|
895
|
+
r#"["first_family","second_name",["label_a","label_b"],["value_a","value_b"]]"#,
|
896
|
+
r#"["second_family","second_name",["label_a","label_b"],["value_a","value_b"]]"#,
|
897
|
+
],
|
898
|
+
values: &[1.0, 2.0, 3.0],
|
899
|
+
pids: &["worker-1", "worker-1", "worker-1"],
|
900
|
+
expected_out: Some(indoc! {r##"# HELP first_family Multiprocess metric
|
901
|
+
# TYPE first_family gauge
|
902
|
+
first_name{label_a="value_a",label_b="value_b",pid="worker-1"} 1
|
903
|
+
second_name{label_a="value_a",label_b="value_b",pid="worker-1"} 2
|
904
|
+
# HELP second_family Multiprocess metric
|
905
|
+
# TYPE second_family gauge
|
906
|
+
second_name{label_a="value_a",label_b="value_b",pid="worker-1"} 3
|
907
|
+
"##}),
|
908
|
+
expected_err: None,
|
909
|
+
},
|
910
|
+
TestCase {
|
911
|
+
name: "same metrics, pid significant, separate workers",
|
912
|
+
multiprocess_mode: "all",
|
913
|
+
json: &[
|
914
|
+
r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
|
915
|
+
r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
|
916
|
+
],
|
917
|
+
values: &[1.0, 2.0],
|
918
|
+
pids: &["worker-1", "worker-2"],
|
919
|
+
expected_out: Some(indoc! {r##"# HELP first_family Multiprocess metric
|
920
|
+
# TYPE first_family gauge
|
921
|
+
first_name{label_a="value_a",label_b="value_b",pid="worker-1"} 1
|
922
|
+
first_name{label_a="value_a",label_b="value_b",pid="worker-2"} 2
|
923
|
+
"##}),
|
924
|
+
expected_err: None,
|
925
|
+
},
|
926
|
+
TestCase {
|
927
|
+
name: "same metrics, pid not significant, separate workers",
|
928
|
+
multiprocess_mode: "max",
|
929
|
+
json: &[
|
930
|
+
r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
|
931
|
+
r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
|
932
|
+
],
|
933
|
+
values: &[1.0, 2.0],
|
934
|
+
pids: &["worker-1", "worker-2"],
|
935
|
+
expected_out: Some(indoc! {r##"# HELP first_family Multiprocess metric
|
936
|
+
# TYPE first_family gauge
|
937
|
+
first_name{label_a="value_a",label_b="value_b"} 1
|
938
|
+
first_name{label_a="value_a",label_b="value_b"} 2
|
939
|
+
"##}),
|
940
|
+
expected_err: None,
|
941
|
+
},
|
942
|
+
TestCase {
|
943
|
+
name: "entry fails to parse",
|
944
|
+
multiprocess_mode: "min",
|
945
|
+
json: &[
|
946
|
+
r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
|
947
|
+
r#"[not valid"#,
|
948
|
+
],
|
949
|
+
values: &[1.0, 2.0],
|
950
|
+
pids: &["worker-1", "worker-1"],
|
951
|
+
expected_out: None,
|
952
|
+
expected_err: Some(MmapError::legacy(
|
953
|
+
"Processed entries 1 != map entries 2".to_owned(),
|
954
|
+
RubyError::Runtime,
|
955
|
+
)),
|
956
|
+
},
|
957
|
+
TestCase {
|
958
|
+
name: "too many values",
|
959
|
+
multiprocess_mode: "min",
|
960
|
+
json: &[r#"["family","name",["label_a"],["value_a","value,_b"]]"#],
|
961
|
+
values: &[1.5],
|
962
|
+
pids: &["worker-1"],
|
963
|
+
expected_out: None,
|
964
|
+
expected_err: Some(MmapError::legacy(
|
965
|
+
"Processed entries 0 != map entries 1".to_owned(),
|
966
|
+
RubyError::Runtime,
|
967
|
+
)),
|
968
|
+
},
|
969
|
+
TestCase {
|
970
|
+
name: "no values",
|
971
|
+
multiprocess_mode: "min",
|
972
|
+
json: &[r#"["family","name",["label_a"]]"#],
|
973
|
+
values: &[1.5],
|
974
|
+
pids: &["worker-1"],
|
975
|
+
expected_out: None,
|
976
|
+
expected_err: Some(MmapError::legacy(
|
977
|
+
"Processed entries 0 != map entries 1".to_owned(),
|
978
|
+
RubyError::Runtime,
|
979
|
+
)),
|
980
|
+
},
|
981
|
+
TestCase {
|
982
|
+
name: "no labels or values",
|
983
|
+
multiprocess_mode: "min",
|
984
|
+
json: &[r#"["family","name","foo"]"#],
|
985
|
+
values: &[1.5],
|
986
|
+
pids: &["worker-1"],
|
987
|
+
expected_out: None,
|
988
|
+
expected_err: Some(MmapError::legacy(
|
989
|
+
"Processed entries 0 != map entries 1".to_owned(),
|
990
|
+
RubyError::Runtime,
|
991
|
+
)),
|
992
|
+
},
|
993
|
+
TestCase {
|
994
|
+
name: "too many leading brackets",
|
995
|
+
multiprocess_mode: "min",
|
996
|
+
json: &[r#"[["family","name",["label_a","label_b"],["value_a","value_b"]]"#],
|
997
|
+
values: &[1.5],
|
998
|
+
pids: &["worker-1"],
|
999
|
+
expected_out: None,
|
1000
|
+
expected_err: Some(MmapError::legacy(
|
1001
|
+
"Processed entries 0 != map entries 1".to_owned(),
|
1002
|
+
RubyError::Runtime,
|
1003
|
+
)),
|
1004
|
+
},
|
1005
|
+
TestCase {
|
1006
|
+
name: "too many trailing brackets",
|
1007
|
+
multiprocess_mode: "min",
|
1008
|
+
json: &[r#"["family","name",["label_a","label_b"],["value_a","value_b"]]]"#],
|
1009
|
+
values: &[1.5],
|
1010
|
+
pids: &["worker-1"],
|
1011
|
+
expected_out: None,
|
1012
|
+
expected_err: Some(MmapError::legacy(
|
1013
|
+
"Processed entries 0 != map entries 1".to_owned(),
|
1014
|
+
RubyError::Runtime,
|
1015
|
+
)),
|
1016
|
+
},
|
1017
|
+
TestCase {
|
1018
|
+
name: "too many leading label brackets",
|
1019
|
+
multiprocess_mode: "min",
|
1020
|
+
json: &[r#"["family","name",[["label_a","label_b"],["value_a","value_b"]]"#],
|
1021
|
+
values: &[1.5],
|
1022
|
+
pids: &["worker-1"],
|
1023
|
+
expected_out: None,
|
1024
|
+
expected_err: Some(MmapError::legacy(
|
1025
|
+
"Processed entries 0 != map entries 1".to_owned(),
|
1026
|
+
RubyError::Runtime,
|
1027
|
+
)),
|
1028
|
+
},
|
1029
|
+
TestCase {
|
1030
|
+
name: "too many leading label brackets",
|
1031
|
+
multiprocess_mode: "min",
|
1032
|
+
json: &[r#"["family","name",[["label_a","label_b"],["value_a","value_b"]]"#],
|
1033
|
+
values: &[1.5],
|
1034
|
+
pids: &["worker-1"],
|
1035
|
+
expected_out: None,
|
1036
|
+
expected_err: Some(MmapError::legacy(
|
1037
|
+
"Processed entries 0 != map entries 1".to_owned(),
|
1038
|
+
RubyError::Runtime,
|
1039
|
+
)),
|
1040
|
+
},
|
1041
|
+
TestCase {
|
1042
|
+
name: "too many leading value brackets",
|
1043
|
+
multiprocess_mode: "min",
|
1044
|
+
json: &[r#"["family","name",["label_a","label_b"],[["value_a","value_b"]]"#],
|
1045
|
+
values: &[1.5],
|
1046
|
+
pids: &["worker-1"],
|
1047
|
+
expected_out: None,
|
1048
|
+
expected_err: Some(MmapError::legacy(
|
1049
|
+
"Processed entries 0 != map entries 1".to_owned(),
|
1050
|
+
RubyError::Runtime,
|
1051
|
+
)),
|
1052
|
+
},
|
1053
|
+
TestCase {
|
1054
|
+
name: "misplaced bracket",
|
1055
|
+
multiprocess_mode: "min",
|
1056
|
+
json: &[r#"["family","name",["label_a","label_b"],]["value_a","value_b"]]"#],
|
1057
|
+
values: &[1.5],
|
1058
|
+
pids: &["worker-1"],
|
1059
|
+
expected_out: None,
|
1060
|
+
expected_err: Some(MmapError::legacy(
|
1061
|
+
"Processed entries 0 != map entries 1".to_owned(),
|
1062
|
+
RubyError::Runtime,
|
1063
|
+
)),
|
1064
|
+
},
|
1065
|
+
TestCase {
|
1066
|
+
name: "comma in numeric",
|
1067
|
+
multiprocess_mode: "min",
|
1068
|
+
json: &[r#"["family","name",["label_a","label_b"],["value_a",403,0]]"#],
|
1069
|
+
values: &[1.5],
|
1070
|
+
pids: &["worker-1"],
|
1071
|
+
expected_out: None,
|
1072
|
+
expected_err: Some(MmapError::legacy(
|
1073
|
+
"Processed entries 0 != map entries 1".to_owned(),
|
1074
|
+
RubyError::Runtime,
|
1075
|
+
)),
|
1076
|
+
},
|
1077
|
+
TestCase {
|
1078
|
+
name: "non-e letter in numeric",
|
1079
|
+
multiprocess_mode: "min",
|
1080
|
+
json: &[r#"["family","name",["label_a","label_b"],["value_a",-2.0c5]]"#],
|
1081
|
+
values: &[1.5],
|
1082
|
+
pids: &["worker-1"],
|
1083
|
+
expected_out: None,
|
1084
|
+
expected_err: Some(MmapError::legacy(
|
1085
|
+
"Processed entries 0 != map entries 1".to_owned(),
|
1086
|
+
RubyError::Runtime,
|
1087
|
+
)),
|
1088
|
+
},
|
1089
|
+
];
|
1090
|
+
|
1091
|
+
for case in tc {
|
1092
|
+
let name = case.name;
|
1093
|
+
|
1094
|
+
let input_bytes: Vec<BString> = case
|
1095
|
+
.json
|
1096
|
+
.iter()
|
1097
|
+
.zip(case.values)
|
1098
|
+
.map(|(&s, &value)| TestEntry::new(s, value).as_bstring())
|
1099
|
+
.collect();
|
1100
|
+
|
1101
|
+
let mut file_infos = Vec::new();
|
1102
|
+
for pid in case.pids {
|
1103
|
+
let TestFile {
|
1104
|
+
file,
|
1105
|
+
path,
|
1106
|
+
dir: _dir,
|
1107
|
+
} = TestFile::new(b"foobar");
|
1108
|
+
|
1109
|
+
let info = FileInfo {
|
1110
|
+
file,
|
1111
|
+
path,
|
1112
|
+
len: case.json.len(),
|
1113
|
+
multiprocess_mode: Symbol::new(case.multiprocess_mode),
|
1114
|
+
type_: Symbol::new("gauge"),
|
1115
|
+
pid: pid.to_string(),
|
1116
|
+
};
|
1117
|
+
file_infos.push(info);
|
1118
|
+
}
|
1119
|
+
|
1120
|
+
let file_entries: Vec<FileEntry> = input_bytes
|
1121
|
+
.iter()
|
1122
|
+
.map(|s| RawEntry::from_slice(s).unwrap())
|
1123
|
+
.zip(file_infos)
|
1124
|
+
.map(|(entry, info)| {
|
1125
|
+
let meta = EntryMetadata::new(&entry, &info).unwrap();
|
1126
|
+
let borrowed =
|
1127
|
+
BorrowedData::new(&entry, &info, meta.is_pid_significant()).unwrap();
|
1128
|
+
let data = EntryData::try_from(borrowed).unwrap();
|
1129
|
+
FileEntry { data, meta }
|
1130
|
+
})
|
1131
|
+
.collect();
|
1132
|
+
|
1133
|
+
let output = FileEntry::entries_to_string(file_entries);
|
1134
|
+
|
1135
|
+
if let Some(expected_out) = case.expected_out {
|
1136
|
+
assert_eq!(
|
1137
|
+
expected_out,
|
1138
|
+
output.as_ref().unwrap(),
|
1139
|
+
"test case: {name} - output"
|
1140
|
+
);
|
1141
|
+
}
|
1142
|
+
|
1143
|
+
if let Some(expected_err) = case.expected_err {
|
1144
|
+
assert_eq!(
|
1145
|
+
expected_err,
|
1146
|
+
output.unwrap_err(),
|
1147
|
+
"test case: {name} - error"
|
1148
|
+
);
|
1149
|
+
}
|
1150
|
+
}
|
1151
|
+
}
|
1152
|
+
|
1153
|
+
#[test]
|
1154
|
+
fn test_merge() {
|
1155
|
+
struct TestCase {
|
1156
|
+
name: &'static str,
|
1157
|
+
metric_type: &'static str,
|
1158
|
+
multiprocess_mode: &'static str,
|
1159
|
+
values: &'static [f64],
|
1160
|
+
expected_value: f64,
|
1161
|
+
}
|
1162
|
+
|
1163
|
+
let _cleanup = unsafe { magnus::embed::init() };
|
1164
|
+
let ruby = magnus::Ruby::get().unwrap();
|
1165
|
+
crate::init(&ruby).unwrap();
|
1166
|
+
|
1167
|
+
let tc = vec![
|
1168
|
+
TestCase {
|
1169
|
+
name: "gauge max",
|
1170
|
+
metric_type: "gauge",
|
1171
|
+
multiprocess_mode: "max",
|
1172
|
+
values: &[1.0, 5.0],
|
1173
|
+
expected_value: 5.0,
|
1174
|
+
},
|
1175
|
+
TestCase {
|
1176
|
+
name: "gauge min",
|
1177
|
+
metric_type: "gauge",
|
1178
|
+
multiprocess_mode: "min",
|
1179
|
+
values: &[1.0, 5.0],
|
1180
|
+
expected_value: 1.0,
|
1181
|
+
},
|
1182
|
+
TestCase {
|
1183
|
+
name: "gauge livesum",
|
1184
|
+
metric_type: "gauge",
|
1185
|
+
multiprocess_mode: "livesum",
|
1186
|
+
values: &[1.0, 5.0],
|
1187
|
+
expected_value: 6.0,
|
1188
|
+
},
|
1189
|
+
TestCase {
|
1190
|
+
name: "gauge all",
|
1191
|
+
metric_type: "gauge",
|
1192
|
+
multiprocess_mode: "all",
|
1193
|
+
values: &[1.0, 5.0],
|
1194
|
+
expected_value: 5.0,
|
1195
|
+
},
|
1196
|
+
TestCase {
|
1197
|
+
name: "not a gauge",
|
1198
|
+
metric_type: "histogram",
|
1199
|
+
multiprocess_mode: "max",
|
1200
|
+
values: &[1.0, 5.0],
|
1201
|
+
expected_value: 6.0,
|
1202
|
+
},
|
1203
|
+
];
|
1204
|
+
|
1205
|
+
for case in tc {
|
1206
|
+
let name = case.name;
|
1207
|
+
let json = r#"["family","metric",["label_a","label_b"],["value_a","value_b"]]"#;
|
1208
|
+
|
1209
|
+
let TestFile {
|
1210
|
+
file,
|
1211
|
+
path,
|
1212
|
+
dir: _dir,
|
1213
|
+
} = TestFile::new(b"foobar");
|
1214
|
+
|
1215
|
+
let info = FileInfo {
|
1216
|
+
file,
|
1217
|
+
path,
|
1218
|
+
len: json.len(),
|
1219
|
+
multiprocess_mode: Symbol::new(case.multiprocess_mode),
|
1220
|
+
type_: Symbol::new(case.metric_type),
|
1221
|
+
pid: "worker-1".to_string(),
|
1222
|
+
};
|
1223
|
+
|
1224
|
+
let input_bytes: Vec<BString> = case
|
1225
|
+
.values
|
1226
|
+
.iter()
|
1227
|
+
.map(|&value| TestEntry::new(json, value).as_bstring())
|
1228
|
+
.collect();
|
1229
|
+
|
1230
|
+
let entries: Vec<FileEntry> = input_bytes
|
1231
|
+
.iter()
|
1232
|
+
.map(|s| RawEntry::from_slice(s).unwrap())
|
1233
|
+
.map(|entry| {
|
1234
|
+
let meta = EntryMetadata::new(&entry, &info).unwrap();
|
1235
|
+
let borrowed =
|
1236
|
+
BorrowedData::new(&entry, &info, meta.is_pid_significant()).unwrap();
|
1237
|
+
let data = EntryData::try_from(borrowed).unwrap();
|
1238
|
+
FileEntry { data, meta }
|
1239
|
+
})
|
1240
|
+
.collect();
|
1241
|
+
|
1242
|
+
let mut entry_a = entries[0].clone();
|
1243
|
+
let entry_b = entries[1].clone();
|
1244
|
+
entry_a.meta.merge(&entry_b.meta);
|
1245
|
+
|
1246
|
+
assert_eq!(
|
1247
|
+
case.expected_value, entry_a.meta.value.unwrap(),
|
1248
|
+
"test case: {name} - value"
|
1249
|
+
);
|
1250
|
+
}
|
1251
|
+
}
|
1252
|
+
}
|