vinted-prometheus-client-mmap 1.2.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (52) hide show
  1. checksums.yaml +7 -0
  2. data/README.md +2 -0
  3. data/ext/fast_mmaped_file_rs/Cargo.toml +40 -0
  4. data/ext/fast_mmaped_file_rs/README.md +52 -0
  5. data/ext/fast_mmaped_file_rs/build.rs +7 -0
  6. data/ext/fast_mmaped_file_rs/extconf.rb +28 -0
  7. data/ext/fast_mmaped_file_rs/src/error.rs +174 -0
  8. data/ext/fast_mmaped_file_rs/src/exemplars.rs +25 -0
  9. data/ext/fast_mmaped_file_rs/src/file_entry.rs +1190 -0
  10. data/ext/fast_mmaped_file_rs/src/file_info.rs +240 -0
  11. data/ext/fast_mmaped_file_rs/src/lib.rs +87 -0
  12. data/ext/fast_mmaped_file_rs/src/macros.rs +14 -0
  13. data/ext/fast_mmaped_file_rs/src/map.rs +492 -0
  14. data/ext/fast_mmaped_file_rs/src/metrics.proto +153 -0
  15. data/ext/fast_mmaped_file_rs/src/mmap/inner.rs +704 -0
  16. data/ext/fast_mmaped_file_rs/src/mmap.rs +896 -0
  17. data/ext/fast_mmaped_file_rs/src/raw_entry.rs +473 -0
  18. data/ext/fast_mmaped_file_rs/src/testhelper.rs +222 -0
  19. data/ext/fast_mmaped_file_rs/src/util.rs +121 -0
  20. data/lib/.DS_Store +0 -0
  21. data/lib/prometheus/.DS_Store +0 -0
  22. data/lib/prometheus/client/configuration.rb +23 -0
  23. data/lib/prometheus/client/counter.rb +27 -0
  24. data/lib/prometheus/client/formats/protobuf.rb +92 -0
  25. data/lib/prometheus/client/formats/text.rb +85 -0
  26. data/lib/prometheus/client/gauge.rb +40 -0
  27. data/lib/prometheus/client/helper/entry_parser.rb +132 -0
  28. data/lib/prometheus/client/helper/file_locker.rb +50 -0
  29. data/lib/prometheus/client/helper/json_parser.rb +23 -0
  30. data/lib/prometheus/client/helper/metrics_processing.rb +45 -0
  31. data/lib/prometheus/client/helper/metrics_representation.rb +51 -0
  32. data/lib/prometheus/client/helper/mmaped_file.rb +64 -0
  33. data/lib/prometheus/client/helper/plain_file.rb +29 -0
  34. data/lib/prometheus/client/histogram.rb +80 -0
  35. data/lib/prometheus/client/label_set_validator.rb +85 -0
  36. data/lib/prometheus/client/metric.rb +80 -0
  37. data/lib/prometheus/client/mmaped_dict.rb +79 -0
  38. data/lib/prometheus/client/mmaped_value.rb +158 -0
  39. data/lib/prometheus/client/page_size.rb +17 -0
  40. data/lib/prometheus/client/push.rb +203 -0
  41. data/lib/prometheus/client/rack/collector.rb +88 -0
  42. data/lib/prometheus/client/rack/exporter.rb +102 -0
  43. data/lib/prometheus/client/registry.rb +65 -0
  44. data/lib/prometheus/client/simple_value.rb +31 -0
  45. data/lib/prometheus/client/summary.rb +69 -0
  46. data/lib/prometheus/client/support/puma.rb +44 -0
  47. data/lib/prometheus/client/support/unicorn.rb +35 -0
  48. data/lib/prometheus/client/uses_value_type.rb +20 -0
  49. data/lib/prometheus/client/version.rb +5 -0
  50. data/lib/prometheus/client.rb +58 -0
  51. data/lib/prometheus.rb +3 -0
  52. metadata +203 -0
@@ -0,0 +1,1190 @@
1
+ use core::panic;
2
+ use magnus::Symbol;
3
+ use serde::Deserialize;
4
+ use serde_json::value::RawValue;
5
+ use smallvec::SmallVec;
6
+ use std::fmt::Write;
7
+ use std::str;
8
+
9
+ use crate::error::{MmapError, RubyError};
10
+ use crate::file_info::FileInfo;
11
+ use crate::raw_entry::RawEntry;
12
+ use crate::Result;
13
+ use crate::{SYM_GAUGE, SYM_LIVESUM, SYM_MAX, SYM_MIN};
14
+ use std::io::Cursor;
15
+ use varint_rs::VarintWriter;
16
+
17
+ pub mod io {
18
+ pub mod prometheus {
19
+ pub mod client {
20
+ include!(concat!(env!("OUT_DIR"), "/io.prometheus.client.rs"));
21
+ }
22
+ }
23
+ }
24
+
25
+ /// A metrics entry extracted from a `*.db` file.
26
+ #[derive(Clone, Debug)]
27
+ pub struct FileEntry {
28
+ pub data: EntryData,
29
+ pub meta: EntryMetadata,
30
+ }
31
+
32
+ /// String slices pointing to the fields of a borrowed `Entry`'s JSON data.
33
+ #[derive(Deserialize, Debug, Clone)]
34
+ pub struct MetricText<'a> {
35
+ pub family_name: &'a str,
36
+ pub metric_name: &'a str,
37
+ pub labels: SmallVec<[&'a str; 4]>,
38
+ #[serde(borrow)]
39
+ pub values: SmallVec<[&'a RawValue; 4]>,
40
+ }
41
+
42
+ /// The primary data payload for a `FileEntry`, the JSON string and the
43
+ /// associated pid, if significant. Used as the key for `EntryMap`.
44
+ #[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Clone, Debug)]
45
+ pub struct EntryData {
46
+ pub json: String,
47
+ pub pid: Option<String>,
48
+ }
49
+
50
+ impl<'a> PartialEq<BorrowedData<'a>> for EntryData {
51
+ fn eq(&self, other: &BorrowedData) -> bool {
52
+ self.pid.as_deref() == other.pid && self.json == other.json
53
+ }
54
+ }
55
+
56
+ impl<'a> TryFrom<BorrowedData<'a>> for EntryData {
57
+ type Error = MmapError;
58
+
59
+ fn try_from(borrowed: BorrowedData) -> Result<Self> {
60
+ let mut json = String::new();
61
+ if json.try_reserve_exact(borrowed.json.len()).is_err() {
62
+ return Err(MmapError::OutOfMemory(borrowed.json.len()));
63
+ }
64
+ json.push_str(borrowed.json);
65
+
66
+ Ok(Self {
67
+ json,
68
+ // Don't bother checking for allocation failure, typically ~10 bytes
69
+ pid: borrowed.pid.map(|p| p.to_string()),
70
+ })
71
+ }
72
+ }
73
+
74
+ /// A borrowed copy of the JSON string and pid for a `FileEntry`. We use this
75
+ /// to check if a given string/pid combination is present in the `EntryMap`,
76
+ /// copying them to owned values only when needed.
77
+ #[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Clone, Debug)]
78
+ pub struct BorrowedData<'a> {
79
+ pub json: &'a str,
80
+ pub pid: Option<&'a str>,
81
+ }
82
+
83
+ impl<'a> BorrowedData<'a> {
84
+ pub fn new(
85
+ raw_entry: &'a RawEntry,
86
+ file_info: &'a FileInfo,
87
+ pid_significant: bool,
88
+ ) -> Result<Self> {
89
+ let json = str::from_utf8(raw_entry.json())
90
+ .map_err(|e| MmapError::Encoding(format!("invalid UTF-8 in entry JSON: {e}")))?;
91
+
92
+ let pid = if pid_significant {
93
+ Some(file_info.pid.as_str())
94
+ } else {
95
+ None
96
+ };
97
+
98
+ Ok(Self { json, pid })
99
+ }
100
+ }
101
+
102
+ /// The metadata associated with a `FileEntry`. The value in `EntryMap`.
103
+ #[derive(Clone, Debug)]
104
+ pub struct EntryMetadata {
105
+ pub multiprocess_mode: Symbol,
106
+ pub type_: Symbol,
107
+ pub value: f64,
108
+ }
109
+
110
+ impl EntryMetadata {
111
+ /// Construct a new `FileEntry`, copying the JSON string from the `RawEntry`
112
+ /// into an internal buffer.
113
+ pub fn new(mmap_entry: &RawEntry, file: &FileInfo) -> Result<Self> {
114
+ let value = mmap_entry.value();
115
+
116
+ Ok(EntryMetadata {
117
+ multiprocess_mode: file.multiprocess_mode,
118
+ type_: file.type_,
119
+ value,
120
+ })
121
+ }
122
+
123
+ /// Combine values with another `EntryMetadata`.
124
+ pub fn merge(&mut self, other: &Self) {
125
+ if self.type_ == SYM_GAUGE {
126
+ match self.multiprocess_mode {
127
+ s if s == SYM_MIN => self.value = self.value.min(other.value),
128
+ s if s == SYM_MAX => self.value = self.value.max(other.value),
129
+ s if s == SYM_LIVESUM => self.value += other.value,
130
+ _ => self.value = other.value,
131
+ }
132
+ } else {
133
+ self.value += other.value;
134
+ }
135
+ }
136
+
137
+ /// Validate if pid is significant for metric.
138
+ pub fn is_pid_significant(&self) -> bool {
139
+ let mp = self.multiprocess_mode;
140
+
141
+ self.type_ == SYM_GAUGE && !(mp == SYM_MIN || mp == SYM_MAX || mp == SYM_LIVESUM)
142
+ }
143
+ }
144
+
145
+ use crate::io::prometheus::client::MetricType::{Counter, Gauge, Histogram, Summary};
146
+ use itertools::Itertools;
147
+ use prost::Message;
148
+ use std::collections::hash_map::DefaultHasher;
149
+ use std::collections::HashMap;
150
+ use std::hash::Hash;
151
+ use std::hash::Hasher;
152
+ use std::time::{SystemTime, UNIX_EPOCH};
153
+
154
+ use std::io::Write as OtherWrite;
155
+ impl FileEntry {
156
+ pub fn trim_quotes(s: &str) -> String {
157
+ let mut chars = s.chars();
158
+
159
+ if s.starts_with('"') {
160
+ chars.next();
161
+ }
162
+ if s.ends_with('"') {
163
+ chars.next_back();
164
+ }
165
+
166
+ chars.as_str().to_string()
167
+ }
168
+
169
+ pub fn entries_to_protobuf(entries: Vec<FileEntry>) -> Result<String> {
170
+ let mut buffer: Cursor<Vec<u8>> = Cursor::new(Vec::new());
171
+ let mut mtrcs: HashMap<u64, io::prometheus::client::Metric> = HashMap::new();
172
+ let mut metric_types = HashMap::new();
173
+ let mut metric_names = HashMap::new();
174
+
175
+ entries
176
+ .iter()
177
+ // TODO: Don't just unwrap. Handle the error gracefully.
178
+ .map(|v| {
179
+ (
180
+ v,
181
+ serde_json::from_str::<MetricText>(&v.data.json)
182
+ .expect("cannot parse json entry"),
183
+ v.meta.type_.name().expect("getting name").into_owned(),
184
+ )
185
+ })
186
+ .filter(|v| v.1.labels.len() == v.1.values.len())
187
+ .group_by(|v| v.1.family_name)
188
+ .into_iter()
189
+ .for_each(|(_, group)| {
190
+ // NOTE(GiedriusS): different dynamic labels fall under the same
191
+ // metric group.
192
+
193
+ 'outer: for gr in group {
194
+ let metric_type = gr.2;
195
+
196
+ let lbls =
197
+ gr.1.labels
198
+ .iter()
199
+ .map(|l| Self::trim_quotes(l))
200
+ .zip(gr.1.values.iter().map(|v| Self::trim_quotes(v.get())));
201
+
202
+ let mut m = io::prometheus::client::Metric {
203
+ label: lbls
204
+ .clone()
205
+ .map(|l| io::prometheus::client::LabelPair {
206
+ name: Some(l.0),
207
+ value: Some(l.1.to_string()),
208
+ })
209
+ .collect::<Vec<io::prometheus::client::LabelPair>>(),
210
+ gauge: None,
211
+ counter: None,
212
+ summary: None,
213
+ untyped: None,
214
+ histogram: None,
215
+ timestamp_ms: None,
216
+ };
217
+
218
+ match metric_type.as_str() {
219
+ "counter" => {
220
+ let mut hasher = DefaultHasher::new();
221
+
222
+ // Iterate over the tuples and hash their elements
223
+ for (a, b) in lbls {
224
+ a.hash(&mut hasher);
225
+ b.hash(&mut hasher);
226
+ }
227
+ "counter".hash(&mut hasher);
228
+
229
+ // Get the final u64 hash value
230
+ let hash_value = hasher.finish();
231
+
232
+ let start = SystemTime::now();
233
+ let since_the_epoch = start
234
+ .duration_since(UNIX_EPOCH)
235
+ .expect("Time went backwards");
236
+
237
+ m.counter = Some(io::prometheus::client::Counter {
238
+ value: Some(gr.0.meta.value),
239
+ created_timestamp: None,
240
+ exemplar: Some(io::prometheus::client::Exemplar{
241
+ label: vec![
242
+ io::prometheus::client::LabelPair {
243
+ name: Some("traceID".to_string()),
244
+ value: Some("123456789".to_string()),
245
+ }
246
+ ],
247
+ value: Some(gr.0.meta.value),
248
+ timestamp: Some(prost_types::Timestamp { seconds:since_the_epoch.as_secs() as i64 , nanos: since_the_epoch.as_nanos() as i32 }),
249
+ }),
250
+ });
251
+
252
+ mtrcs.insert(hash_value, m);
253
+ metric_types.insert(hash_value, "counter");
254
+ metric_names.insert(hash_value, gr.1.metric_name);
255
+ }
256
+ "gauge" => {
257
+ let mut hasher = DefaultHasher::new();
258
+
259
+ // Iterate over the tuples and hash their elements
260
+ for (a, b) in lbls {
261
+ a.hash(&mut hasher);
262
+ b.hash(&mut hasher);
263
+ }
264
+ "gauge".hash(&mut hasher);
265
+
266
+ let hash_value = hasher.finish();
267
+
268
+ m.gauge = Some(io::prometheus::client::Gauge {
269
+ value: Some(gr.0.meta.value),
270
+ });
271
+ mtrcs.insert(hash_value, m);
272
+ metric_types.insert(hash_value, "gauge");
273
+ metric_names.insert(hash_value, gr.1.metric_name);
274
+ }
275
+ "histogram" => {
276
+ let mut hasher = DefaultHasher::new();
277
+
278
+ let mut le: Option<f64> = None;
279
+
280
+ // Iterate over the tuples and hash their elements
281
+ for (a, b) in lbls {
282
+ if a != "le" {
283
+ a.hash(&mut hasher);
284
+ b.hash(&mut hasher);
285
+ }
286
+
287
+ // Safe to ignore +Inf bound.
288
+ if a == "le" {
289
+ if b == "+Inf" {
290
+ continue 'outer;
291
+ }
292
+ let leparsed = b.parse::<f64>();
293
+ match leparsed {
294
+ Ok(p) => le = Some(p),
295
+ Err(e) => panic!("failed to parse {} due to {}", b, e),
296
+ }
297
+ }
298
+ }
299
+ "histogram".hash(&mut hasher);
300
+
301
+ let hash_value = hasher.finish();
302
+
303
+ match mtrcs.get_mut(&hash_value) {
304
+ Some(v) => {
305
+ let hs =
306
+ v.histogram.as_mut().expect("getting mutable histogram");
307
+
308
+ for bucket in &mut hs.bucket {
309
+ if bucket.upper_bound != le {
310
+ continue;
311
+ }
312
+
313
+ let mut curf: f64 =
314
+ bucket.cumulative_count_float.unwrap_or_default();
315
+ curf += gr.0.meta.value;
316
+
317
+ bucket.cumulative_count_float = Some(curf);
318
+ }
319
+ }
320
+ None => {
321
+ let mut final_metric_name = gr.1.metric_name;
322
+
323
+ if let Some(stripped) =
324
+ final_metric_name.strip_suffix("_bucket")
325
+ {
326
+ final_metric_name = stripped;
327
+ }
328
+ if let Some(stripped) = final_metric_name.strip_suffix("_sum") {
329
+ final_metric_name = stripped;
330
+ }
331
+ if let Some(stripped) = final_metric_name.strip_suffix("_count")
332
+ {
333
+ final_metric_name = stripped;
334
+ }
335
+
336
+ let buckets = vec![io::prometheus::client::Bucket {
337
+ cumulative_count: None,
338
+ cumulative_count_float: Some(gr.0.meta.value),
339
+ upper_bound: Some(
340
+ le.expect(
341
+ &format!("got no LE for {}", gr.1.metric_name)
342
+ .to_string(),
343
+ ),
344
+ ),
345
+ exemplar: None,
346
+ }];
347
+ m.label = m
348
+ .label
349
+ .into_iter()
350
+ .filter(|l| l.name != Some("le".to_string()))
351
+ .collect_vec();
352
+ // Create a new metric.
353
+ m.histogram = Some(io::prometheus::client::Histogram {
354
+ // All native histogram fields.
355
+ sample_count: None,
356
+ sample_count_float: None,
357
+ sample_sum: None,
358
+ created_timestamp: None,
359
+ schema: None,
360
+ zero_count: None,
361
+ zero_count_float: None,
362
+ zero_threshold: None,
363
+ negative_count: vec![],
364
+ negative_delta: vec![],
365
+ negative_span: vec![],
366
+ positive_count: vec![],
367
+ positive_delta: vec![],
368
+ positive_span: vec![],
369
+ // All classic histogram fields.
370
+ bucket: buckets,
371
+ });
372
+ mtrcs.insert(hash_value, m);
373
+ metric_types.insert(hash_value, "histogram");
374
+ metric_names.insert(hash_value, final_metric_name);
375
+ }
376
+ }
377
+ }
378
+ "summary" => {
379
+ let mut hasher = DefaultHasher::new();
380
+
381
+ let mut quantile: Option<f64> = None;
382
+
383
+ // Iterate over the tuples and hash their elements
384
+ for (a, b) in lbls {
385
+ if a != "quantile" {
386
+ a.hash(&mut hasher);
387
+ b.hash(&mut hasher);
388
+ }
389
+ if a == "quantile" {
390
+ let quantileparsed = b.parse::<f64>();
391
+ match quantileparsed {
392
+ Ok(p) => quantile = Some(p),
393
+ Err(e) => {
394
+ panic!("failed to parse quantile {} due to {}", b, e)
395
+ }
396
+ }
397
+ }
398
+ }
399
+ "summary".hash(&mut hasher);
400
+ let hash_value = hasher.finish();
401
+
402
+ match mtrcs.get_mut(&hash_value) {
403
+ Some(v) => {
404
+ // Go through and edit buckets.
405
+ let smry = v.summary.as_mut().expect(
406
+ &format!(
407
+ "getting mutable summary for {}",
408
+ gr.1.metric_name
409
+ )
410
+ .to_string(),
411
+ );
412
+
413
+ if gr.1.metric_name.ends_with("_count") {
414
+ let samplecount = smry.sample_count.unwrap_or_default();
415
+ smry.sample_count =
416
+ Some((gr.0.meta.value as u64) + samplecount);
417
+ } else if gr.1.metric_name.ends_with("_sum") {
418
+ let samplesum: f64 = smry.sample_sum.unwrap_or_default();
419
+ smry.sample_sum = Some(gr.0.meta.value + samplesum);
420
+ } else {
421
+ let mut found_quantile = false;
422
+ for qntl in &mut smry.quantile {
423
+ if qntl.quantile != quantile {
424
+ continue;
425
+ }
426
+
427
+ let mut curq: f64 = qntl.quantile.unwrap_or_default();
428
+ curq += gr.0.meta.value;
429
+
430
+ qntl.quantile = Some(curq);
431
+ found_quantile = true;
432
+ }
433
+
434
+ if !found_quantile {
435
+ smry.quantile.push(io::prometheus::client::Quantile {
436
+ quantile: quantile,
437
+ value: Some(gr.0.meta.value),
438
+ });
439
+ }
440
+ }
441
+ }
442
+ None => {
443
+ m.label = m
444
+ .label
445
+ .into_iter()
446
+ .filter(|l| l.name != Some("quantile".to_string()))
447
+ .collect_vec();
448
+
449
+ let mut final_metric_name = gr.1.metric_name;
450
+ // If quantile then add to quantiles.
451
+ // if ends with _count then add it to count.
452
+ // If ends with _sum then add it to sum.
453
+ if gr.1.metric_name.ends_with("_count") {
454
+ final_metric_name =
455
+ gr.1.metric_name.strip_suffix("_count").unwrap();
456
+ m.summary = Some(io::prometheus::client::Summary {
457
+ quantile: vec![],
458
+ sample_count: Some(gr.0.meta.value as u64),
459
+ sample_sum: None,
460
+ created_timestamp: None,
461
+ });
462
+ } else if gr.1.metric_name.ends_with("_sum") {
463
+ final_metric_name =
464
+ gr.1.metric_name.strip_suffix("_sum").unwrap();
465
+ m.summary = Some(io::prometheus::client::Summary {
466
+ quantile: vec![],
467
+ sample_sum: Some(gr.0.meta.value),
468
+ sample_count: None,
469
+ created_timestamp: None,
470
+ });
471
+ } else {
472
+ let quantiles = vec![io::prometheus::client::Quantile {
473
+ quantile: quantile,
474
+ value: Some(gr.0.meta.value),
475
+ }];
476
+ m.summary = Some(io::prometheus::client::Summary {
477
+ quantile: quantiles,
478
+ sample_count: None,
479
+ sample_sum: None,
480
+ created_timestamp: None,
481
+ });
482
+ }
483
+
484
+ mtrcs.insert(hash_value, m);
485
+ metric_types.insert(hash_value, "summary");
486
+ metric_names.insert(hash_value, final_metric_name);
487
+ }
488
+ }
489
+ }
490
+ mtype => {
491
+ panic!("unhandled metric type {}", mtype)
492
+ }
493
+ }
494
+ }
495
+ });
496
+
497
+ mtrcs.iter().for_each(|mtrc| {
498
+ let metric_name = metric_names.get(mtrc.0).expect("getting metric name");
499
+ let metric_type = metric_types.get(mtrc.0).expect("getting metric type");
500
+
501
+ let protobuf_mf = io::prometheus::client::MetricFamily {
502
+ name: Some(metric_name.to_string()),
503
+ help: Some("Multiprocess metric".to_string()),
504
+ r#type: match metric_type.to_string().as_str() {
505
+ "counter" => Some(Counter.into()),
506
+ "gauge" => Some(Gauge.into()),
507
+ "histogram" => Some(Histogram.into()),
508
+ "summary" => Some(Summary.into()),
509
+ mtype => panic!("unhandled metric type {}", mtype),
510
+ },
511
+ metric: vec![mtrc.1.clone()],
512
+ };
513
+
514
+ let encoded_mf = protobuf_mf.encode_to_vec();
515
+
516
+ buffer
517
+ .write_u32_varint(
518
+ encoded_mf
519
+ .len()
520
+ .try_into()
521
+ .expect("failed to encode metricfamily"),
522
+ )
523
+ .unwrap();
524
+ buffer
525
+ .write_all(&encoded_mf)
526
+ .expect("failed to write output");
527
+ });
528
+
529
+ // NOTE: Rust strings are bytes encoded in UTF-8. Ruby doesn't have such
530
+ // invariant. So, let's convert those bytes to a string since everything ends
531
+ // up as a string in Ruby.
532
+ unsafe { Ok(str::from_utf8_unchecked(buffer.get_ref()).to_string()) }
533
+ }
534
+
535
+ /// Convert the sorted entries into a String in Prometheus metrics format.
536
+ pub fn entries_to_string(entries: Vec<FileEntry>) -> Result<String> {
537
+ // We guesstimate that lines are ~100 bytes long, preallocate the string to
538
+ // roughly that size.
539
+ let mut out = String::new();
540
+ out.try_reserve(entries.len() * 128)
541
+ .map_err(|_| MmapError::OutOfMemory(entries.len() * 128))?;
542
+
543
+ let mut prev_name: Option<String> = None;
544
+
545
+ let entry_count = entries.len();
546
+ let mut processed_count = 0;
547
+
548
+ for entry in entries {
549
+ let metrics_data = match serde_json::from_str::<MetricText>(&entry.data.json) {
550
+ Ok(m) => {
551
+ if m.labels.len() != m.values.len() {
552
+ continue;
553
+ }
554
+ m
555
+ }
556
+ // We don't exit the function here so the total number of invalid
557
+ // entries can be calculated below.
558
+ Err(_) => continue,
559
+ };
560
+
561
+ match prev_name.as_ref() {
562
+ Some(p) if p == metrics_data.family_name => {}
563
+ _ => {
564
+ entry.append_header(metrics_data.family_name, &mut out);
565
+ prev_name = Some(metrics_data.family_name.to_owned());
566
+ }
567
+ }
568
+
569
+ entry.append_entry(metrics_data, &mut out)?;
570
+
571
+ writeln!(&mut out, " {}", entry.meta.value)
572
+ .map_err(|e| MmapError::Other(format!("Failed to append to output: {e}")))?;
573
+
574
+ processed_count += 1;
575
+ }
576
+
577
+ if processed_count != entry_count {
578
+ return Err(MmapError::legacy(
579
+ format!("Processed entries {processed_count} != map entries {entry_count}"),
580
+ RubyError::Runtime,
581
+ ));
582
+ }
583
+
584
+ Ok(out)
585
+ }
586
+
587
+ fn append_header(&self, family_name: &str, out: &mut String) {
588
+ out.push_str("# HELP ");
589
+ out.push_str(family_name);
590
+ out.push_str(" Multiprocess metric\n");
591
+
592
+ out.push_str("# TYPE ");
593
+ out.push_str(family_name);
594
+ out.push(' ');
595
+
596
+ out.push_str(&self.meta.type_.name().expect("name was invalid UTF-8"));
597
+ out.push('\n');
598
+ }
599
+
600
+ fn append_entry(&self, json_data: MetricText, out: &mut String) -> Result<()> {
601
+ out.push_str(json_data.metric_name);
602
+
603
+ if json_data.labels.is_empty() {
604
+ if let Some(pid) = self.data.pid.as_ref() {
605
+ out.push_str("{pid=\"");
606
+ out.push_str(pid);
607
+ out.push_str("\"}");
608
+ }
609
+
610
+ return Ok(());
611
+ }
612
+
613
+ out.push('{');
614
+
615
+ let it = json_data.labels.iter().zip(json_data.values.iter());
616
+
617
+ for (i, (&key, val)) in it.enumerate() {
618
+ out.push_str(key);
619
+ out.push('=');
620
+
621
+ match val.get() {
622
+ "null" => out.push_str("\"\""),
623
+ s if s.starts_with('"') => out.push_str(s),
624
+ s => {
625
+ // Quote numeric values.
626
+ out.push('"');
627
+ out.push_str(s);
628
+ out.push('"');
629
+ }
630
+ }
631
+
632
+ if i < json_data.labels.len() - 1 {
633
+ out.push(',');
634
+ }
635
+ }
636
+
637
+ if let Some(pid) = self.data.pid.as_ref() {
638
+ out.push_str(",pid=\"");
639
+ out.push_str(pid);
640
+ out.push('"');
641
+ }
642
+
643
+ out.push('}');
644
+
645
+ Ok(())
646
+ }
647
+ }
648
+
649
+ #[cfg(test)]
650
+ mod test {
651
+ use bstr::BString;
652
+ use indoc::indoc;
653
+
654
+ use super::*;
655
+ use crate::file_info::FileInfo;
656
+ use crate::raw_entry::RawEntry;
657
+ use crate::testhelper::{TestEntry, TestFile};
658
+
659
+ #[test]
660
+ fn test_trim_quotes() {
661
+ assert_eq!("foo", FileEntry::trim_quotes("foo"));
662
+ assert_eq!("foo", FileEntry::trim_quotes("\"foo\""));
663
+ }
664
+
665
+ #[test]
666
+ fn test_entries_to_string() {
667
+ struct TestCase {
668
+ name: &'static str,
669
+ multiprocess_mode: &'static str,
670
+ json: &'static [&'static str],
671
+ values: &'static [f64],
672
+ pids: &'static [&'static str],
673
+ expected_out: Option<&'static str>,
674
+ expected_err: Option<MmapError>,
675
+ }
676
+
677
+ let _cleanup = unsafe { magnus::embed::init() };
678
+ let ruby = magnus::Ruby::get().unwrap();
679
+ crate::init(&ruby).unwrap();
680
+
681
+ let tc = vec![
682
+ TestCase {
683
+ name: "one metric, pid significant",
684
+ multiprocess_mode: "all",
685
+ json: &[r#"["family","name",["label_a","label_b"],["value_a","value_b"]]"#],
686
+ values: &[1.0],
687
+ pids: &["worker-1"],
688
+ expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
689
+ # TYPE family gauge
690
+ name{label_a="value_a",label_b="value_b",pid="worker-1"} 1
691
+ "##}),
692
+ expected_err: None,
693
+ },
694
+ TestCase {
695
+ name: "one metric, no pid",
696
+ multiprocess_mode: "min",
697
+ json: &[r#"["family","name",["label_a","label_b"],["value_a","value_b"]]"#],
698
+ values: &[1.0],
699
+ pids: &["worker-1"],
700
+ expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
701
+ # TYPE family gauge
702
+ name{label_a="value_a",label_b="value_b"} 1
703
+ "##}),
704
+ expected_err: None,
705
+ },
706
+ TestCase {
707
+ name: "many labels",
708
+ multiprocess_mode: "min",
709
+ json: &[
710
+ r#"["family","name",["label_a","label_b","label_c","label_d","label_e"],["value_a","value_b","value_c","value_d","value_e"]]"#,
711
+ ],
712
+ values: &[1.0],
713
+ pids: &["worker-1"],
714
+ expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
715
+ # TYPE family gauge
716
+ name{label_a="value_a",label_b="value_b",label_c="value_c",label_d="value_d",label_e="value_e"} 1
717
+ "##}),
718
+ expected_err: None,
719
+ },
720
+ TestCase {
721
+ name: "floating point shown",
722
+ multiprocess_mode: "min",
723
+ json: &[r#"["family","name",["label_a","label_b"],["value_a","value_b"]]"#],
724
+ values: &[1.5],
725
+ pids: &["worker-1"],
726
+ expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
727
+ # TYPE family gauge
728
+ name{label_a="value_a",label_b="value_b"} 1.5
729
+ "##}),
730
+ expected_err: None,
731
+ },
732
+ TestCase {
733
+ name: "numeric value",
734
+ multiprocess_mode: "min",
735
+ json: &[
736
+ r#"["family","name",["label_a","label_b","label_c"],["value_a",403,-0.2E5]]"#,
737
+ ],
738
+ values: &[1.5],
739
+ pids: &["worker-1"],
740
+ expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
741
+ # TYPE family gauge
742
+ name{label_a="value_a",label_b="403",label_c="-0.2E5"} 1.5
743
+ "##}),
744
+ expected_err: None,
745
+ },
746
+ TestCase {
747
+ name: "null value",
748
+ multiprocess_mode: "min",
749
+ json: &[r#"["family","name",["label_a","label_b"],["value_a",null]]"#],
750
+ values: &[1.5],
751
+ pids: &["worker-1"],
752
+ expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
753
+ # TYPE family gauge
754
+ name{label_a="value_a",label_b=""} 1.5
755
+ "##}),
756
+ expected_err: None,
757
+ },
758
+ TestCase {
759
+ name: "comma in value",
760
+ multiprocess_mode: "min",
761
+ json: &[r#"["family","name",["label_a","label_b"],["value_a","value,_b"]]"#],
762
+ values: &[1.5],
763
+ pids: &["worker-1"],
764
+ expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
765
+ # TYPE family gauge
766
+ name{label_a="value_a",label_b="value,_b"} 1.5
767
+ "##}),
768
+ expected_err: None,
769
+ },
770
+ TestCase {
771
+ name: "no labels, pid significant",
772
+ multiprocess_mode: "all",
773
+ json: &[r#"["family","name",[],[]]"#],
774
+ values: &[1.0],
775
+ pids: &["worker-1"],
776
+ expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
777
+ # TYPE family gauge
778
+ name{pid="worker-1"} 1
779
+ "##}),
780
+ expected_err: None,
781
+ },
782
+ TestCase {
783
+ name: "no labels, no pid",
784
+ multiprocess_mode: "min",
785
+ json: &[r#"["family","name",[],[]]"#],
786
+ values: &[1.0],
787
+ pids: &["worker-1"],
788
+ expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
789
+ # TYPE family gauge
790
+ name 1
791
+ "##}),
792
+ expected_err: None,
793
+ },
794
+ TestCase {
795
+ name: "two metrics, same family, pid significant",
796
+ multiprocess_mode: "all",
797
+ json: &[
798
+ r#"["family","first",["label_a","label_b"],["value_a","value_b"]]"#,
799
+ r#"["family","second",["label_a","label_b"],["value_a","value_b"]]"#,
800
+ ],
801
+ values: &[1.0, 2.0],
802
+ pids: &["worker-1", "worker-1"],
803
+ expected_out: Some(indoc! {r##"# HELP family Multiprocess metric
804
+ # TYPE family gauge
805
+ first{label_a="value_a",label_b="value_b",pid="worker-1"} 1
806
+ second{label_a="value_a",label_b="value_b",pid="worker-1"} 2
807
+ "##}),
808
+ expected_err: None,
809
+ },
810
+ TestCase {
811
+ name: "two metrics, different family, pid significant",
812
+ multiprocess_mode: "min",
813
+ json: &[
814
+ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
815
+ r#"["second_family","second_name",["label_a","label_b"],["value_a","value_b"]]"#,
816
+ ],
817
+ values: &[1.0, 2.0],
818
+ pids: &["worker-1", "worker-1"],
819
+ expected_out: Some(indoc! {r##"# HELP first_family Multiprocess metric
820
+ # TYPE first_family gauge
821
+ first_name{label_a="value_a",label_b="value_b"} 1
822
+ # HELP second_family Multiprocess metric
823
+ # TYPE second_family gauge
824
+ second_name{label_a="value_a",label_b="value_b"} 2
825
+ "##}),
826
+ expected_err: None,
827
+ },
828
+ TestCase {
829
+ name: "three metrics, two different families, pid significant",
830
+ multiprocess_mode: "all",
831
+ json: &[
832
+ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
833
+ r#"["first_family","second_name",["label_a","label_b"],["value_a","value_b"]]"#,
834
+ r#"["second_family","second_name",["label_a","label_b"],["value_a","value_b"]]"#,
835
+ ],
836
+ values: &[1.0, 2.0, 3.0],
837
+ pids: &["worker-1", "worker-1", "worker-1"],
838
+ expected_out: Some(indoc! {r##"# HELP first_family Multiprocess metric
839
+ # TYPE first_family gauge
840
+ first_name{label_a="value_a",label_b="value_b",pid="worker-1"} 1
841
+ second_name{label_a="value_a",label_b="value_b",pid="worker-1"} 2
842
+ # HELP second_family Multiprocess metric
843
+ # TYPE second_family gauge
844
+ second_name{label_a="value_a",label_b="value_b",pid="worker-1"} 3
845
+ "##}),
846
+ expected_err: None,
847
+ },
848
+ TestCase {
849
+ name: "same metrics, pid significant, separate workers",
850
+ multiprocess_mode: "all",
851
+ json: &[
852
+ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
853
+ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
854
+ ],
855
+ values: &[1.0, 2.0],
856
+ pids: &["worker-1", "worker-2"],
857
+ expected_out: Some(indoc! {r##"# HELP first_family Multiprocess metric
858
+ # TYPE first_family gauge
859
+ first_name{label_a="value_a",label_b="value_b",pid="worker-1"} 1
860
+ first_name{label_a="value_a",label_b="value_b",pid="worker-2"} 2
861
+ "##}),
862
+ expected_err: None,
863
+ },
864
+ TestCase {
865
+ name: "same metrics, pid not significant, separate workers",
866
+ multiprocess_mode: "max",
867
+ json: &[
868
+ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
869
+ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
870
+ ],
871
+ values: &[1.0, 2.0],
872
+ pids: &["worker-1", "worker-2"],
873
+ expected_out: Some(indoc! {r##"# HELP first_family Multiprocess metric
874
+ # TYPE first_family gauge
875
+ first_name{label_a="value_a",label_b="value_b"} 1
876
+ first_name{label_a="value_a",label_b="value_b"} 2
877
+ "##}),
878
+ expected_err: None,
879
+ },
880
+ TestCase {
881
+ name: "entry fails to parse",
882
+ multiprocess_mode: "min",
883
+ json: &[
884
+ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#,
885
+ r#"[not valid"#,
886
+ ],
887
+ values: &[1.0, 2.0],
888
+ pids: &["worker-1", "worker-1"],
889
+ expected_out: None,
890
+ expected_err: Some(MmapError::legacy(
891
+ "Processed entries 1 != map entries 2".to_owned(),
892
+ RubyError::Runtime,
893
+ )),
894
+ },
895
+ TestCase {
896
+ name: "too many values",
897
+ multiprocess_mode: "min",
898
+ json: &[r#"["family","name",["label_a"],["value_a","value,_b"]]"#],
899
+ values: &[1.5],
900
+ pids: &["worker-1"],
901
+ expected_out: None,
902
+ expected_err: Some(MmapError::legacy(
903
+ "Processed entries 0 != map entries 1".to_owned(),
904
+ RubyError::Runtime,
905
+ )),
906
+ },
907
+ TestCase {
908
+ name: "no values",
909
+ multiprocess_mode: "min",
910
+ json: &[r#"["family","name",["label_a"]]"#],
911
+ values: &[1.5],
912
+ pids: &["worker-1"],
913
+ expected_out: None,
914
+ expected_err: Some(MmapError::legacy(
915
+ "Processed entries 0 != map entries 1".to_owned(),
916
+ RubyError::Runtime,
917
+ )),
918
+ },
919
+ TestCase {
920
+ name: "no labels or values",
921
+ multiprocess_mode: "min",
922
+ json: &[r#"["family","name","foo"]"#],
923
+ values: &[1.5],
924
+ pids: &["worker-1"],
925
+ expected_out: None,
926
+ expected_err: Some(MmapError::legacy(
927
+ "Processed entries 0 != map entries 1".to_owned(),
928
+ RubyError::Runtime,
929
+ )),
930
+ },
931
+ TestCase {
932
+ name: "too many leading brackets",
933
+ multiprocess_mode: "min",
934
+ json: &[r#"[["family","name",["label_a","label_b"],["value_a","value_b"]]"#],
935
+ values: &[1.5],
936
+ pids: &["worker-1"],
937
+ expected_out: None,
938
+ expected_err: Some(MmapError::legacy(
939
+ "Processed entries 0 != map entries 1".to_owned(),
940
+ RubyError::Runtime,
941
+ )),
942
+ },
943
+ TestCase {
944
+ name: "too many trailing brackets",
945
+ multiprocess_mode: "min",
946
+ json: &[r#"["family","name",["label_a","label_b"],["value_a","value_b"]]]"#],
947
+ values: &[1.5],
948
+ pids: &["worker-1"],
949
+ expected_out: None,
950
+ expected_err: Some(MmapError::legacy(
951
+ "Processed entries 0 != map entries 1".to_owned(),
952
+ RubyError::Runtime,
953
+ )),
954
+ },
955
+ TestCase {
956
+ name: "too many leading label brackets",
957
+ multiprocess_mode: "min",
958
+ json: &[r#"["family","name",[["label_a","label_b"],["value_a","value_b"]]"#],
959
+ values: &[1.5],
960
+ pids: &["worker-1"],
961
+ expected_out: None,
962
+ expected_err: Some(MmapError::legacy(
963
+ "Processed entries 0 != map entries 1".to_owned(),
964
+ RubyError::Runtime,
965
+ )),
966
+ },
967
+ TestCase {
968
+ name: "too many leading label brackets",
969
+ multiprocess_mode: "min",
970
+ json: &[r#"["family","name",[["label_a","label_b"],["value_a","value_b"]]"#],
971
+ values: &[1.5],
972
+ pids: &["worker-1"],
973
+ expected_out: None,
974
+ expected_err: Some(MmapError::legacy(
975
+ "Processed entries 0 != map entries 1".to_owned(),
976
+ RubyError::Runtime,
977
+ )),
978
+ },
979
+ TestCase {
980
+ name: "too many leading value brackets",
981
+ multiprocess_mode: "min",
982
+ json: &[r#"["family","name",["label_a","label_b"],[["value_a","value_b"]]"#],
983
+ values: &[1.5],
984
+ pids: &["worker-1"],
985
+ expected_out: None,
986
+ expected_err: Some(MmapError::legacy(
987
+ "Processed entries 0 != map entries 1".to_owned(),
988
+ RubyError::Runtime,
989
+ )),
990
+ },
991
+ TestCase {
992
+ name: "misplaced bracket",
993
+ multiprocess_mode: "min",
994
+ json: &[r#"["family","name",["label_a","label_b"],]["value_a","value_b"]]"#],
995
+ values: &[1.5],
996
+ pids: &["worker-1"],
997
+ expected_out: None,
998
+ expected_err: Some(MmapError::legacy(
999
+ "Processed entries 0 != map entries 1".to_owned(),
1000
+ RubyError::Runtime,
1001
+ )),
1002
+ },
1003
+ TestCase {
1004
+ name: "comma in numeric",
1005
+ multiprocess_mode: "min",
1006
+ json: &[r#"["family","name",["label_a","label_b"],["value_a",403,0]]"#],
1007
+ values: &[1.5],
1008
+ pids: &["worker-1"],
1009
+ expected_out: None,
1010
+ expected_err: Some(MmapError::legacy(
1011
+ "Processed entries 0 != map entries 1".to_owned(),
1012
+ RubyError::Runtime,
1013
+ )),
1014
+ },
1015
+ TestCase {
1016
+ name: "non-e letter in numeric",
1017
+ multiprocess_mode: "min",
1018
+ json: &[r#"["family","name",["label_a","label_b"],["value_a",-2.0c5]]"#],
1019
+ values: &[1.5],
1020
+ pids: &["worker-1"],
1021
+ expected_out: None,
1022
+ expected_err: Some(MmapError::legacy(
1023
+ "Processed entries 0 != map entries 1".to_owned(),
1024
+ RubyError::Runtime,
1025
+ )),
1026
+ },
1027
+ ];
1028
+
1029
+ for case in tc {
1030
+ let name = case.name;
1031
+
1032
+ let input_bytes: Vec<BString> = case
1033
+ .json
1034
+ .iter()
1035
+ .zip(case.values)
1036
+ .map(|(&s, &value)| TestEntry::new(s, value).as_bstring())
1037
+ .collect();
1038
+
1039
+ let mut file_infos = Vec::new();
1040
+ for pid in case.pids {
1041
+ let TestFile {
1042
+ file,
1043
+ path,
1044
+ dir: _dir,
1045
+ } = TestFile::new(b"foobar");
1046
+
1047
+ let info = FileInfo {
1048
+ file,
1049
+ path,
1050
+ len: case.json.len(),
1051
+ multiprocess_mode: Symbol::new(case.multiprocess_mode),
1052
+ type_: Symbol::new("gauge"),
1053
+ pid: pid.to_string(),
1054
+ };
1055
+ file_infos.push(info);
1056
+ }
1057
+
1058
+ let file_entries: Vec<FileEntry> = input_bytes
1059
+ .iter()
1060
+ .map(|s| RawEntry::from_slice(s).unwrap())
1061
+ .zip(file_infos)
1062
+ .map(|(entry, info)| {
1063
+ let meta = EntryMetadata::new(&entry, &info).unwrap();
1064
+ let borrowed =
1065
+ BorrowedData::new(&entry, &info, meta.is_pid_significant()).unwrap();
1066
+ let data = EntryData::try_from(borrowed).unwrap();
1067
+ FileEntry { data, meta }
1068
+ })
1069
+ .collect();
1070
+
1071
+ let output = FileEntry::entries_to_string(file_entries);
1072
+
1073
+ if let Some(expected_out) = case.expected_out {
1074
+ assert_eq!(
1075
+ expected_out,
1076
+ output.as_ref().unwrap(),
1077
+ "test case: {name} - output"
1078
+ );
1079
+ }
1080
+
1081
+ if let Some(expected_err) = case.expected_err {
1082
+ assert_eq!(
1083
+ expected_err,
1084
+ output.unwrap_err(),
1085
+ "test case: {name} - error"
1086
+ );
1087
+ }
1088
+ }
1089
+ }
1090
+
1091
+ #[test]
1092
+ fn test_merge() {
1093
+ struct TestCase {
1094
+ name: &'static str,
1095
+ metric_type: &'static str,
1096
+ multiprocess_mode: &'static str,
1097
+ values: &'static [f64],
1098
+ expected_value: f64,
1099
+ }
1100
+
1101
+ let _cleanup = unsafe { magnus::embed::init() };
1102
+ let ruby = magnus::Ruby::get().unwrap();
1103
+ crate::init(&ruby).unwrap();
1104
+
1105
+ let tc = vec![
1106
+ TestCase {
1107
+ name: "gauge max",
1108
+ metric_type: "gauge",
1109
+ multiprocess_mode: "max",
1110
+ values: &[1.0, 5.0],
1111
+ expected_value: 5.0,
1112
+ },
1113
+ TestCase {
1114
+ name: "gauge min",
1115
+ metric_type: "gauge",
1116
+ multiprocess_mode: "min",
1117
+ values: &[1.0, 5.0],
1118
+ expected_value: 1.0,
1119
+ },
1120
+ TestCase {
1121
+ name: "gauge livesum",
1122
+ metric_type: "gauge",
1123
+ multiprocess_mode: "livesum",
1124
+ values: &[1.0, 5.0],
1125
+ expected_value: 6.0,
1126
+ },
1127
+ TestCase {
1128
+ name: "gauge all",
1129
+ metric_type: "gauge",
1130
+ multiprocess_mode: "all",
1131
+ values: &[1.0, 5.0],
1132
+ expected_value: 5.0,
1133
+ },
1134
+ TestCase {
1135
+ name: "not a gauge",
1136
+ metric_type: "histogram",
1137
+ multiprocess_mode: "max",
1138
+ values: &[1.0, 5.0],
1139
+ expected_value: 6.0,
1140
+ },
1141
+ ];
1142
+
1143
+ for case in tc {
1144
+ let name = case.name;
1145
+ let json = r#"["family","metric",["label_a","label_b"],["value_a","value_b"]]"#;
1146
+
1147
+ let TestFile {
1148
+ file,
1149
+ path,
1150
+ dir: _dir,
1151
+ } = TestFile::new(b"foobar");
1152
+
1153
+ let info = FileInfo {
1154
+ file,
1155
+ path,
1156
+ len: json.len(),
1157
+ multiprocess_mode: Symbol::new(case.multiprocess_mode),
1158
+ type_: Symbol::new(case.metric_type),
1159
+ pid: "worker-1".to_string(),
1160
+ };
1161
+
1162
+ let input_bytes: Vec<BString> = case
1163
+ .values
1164
+ .iter()
1165
+ .map(|&value| TestEntry::new(json, value).as_bstring())
1166
+ .collect();
1167
+
1168
+ let entries: Vec<FileEntry> = input_bytes
1169
+ .iter()
1170
+ .map(|s| RawEntry::from_slice(s).unwrap())
1171
+ .map(|entry| {
1172
+ let meta = EntryMetadata::new(&entry, &info).unwrap();
1173
+ let borrowed =
1174
+ BorrowedData::new(&entry, &info, meta.is_pid_significant()).unwrap();
1175
+ let data = EntryData::try_from(borrowed).unwrap();
1176
+ FileEntry { data, meta }
1177
+ })
1178
+ .collect();
1179
+
1180
+ let mut entry_a = entries[0].clone();
1181
+ let entry_b = entries[1].clone();
1182
+ entry_a.meta.merge(&entry_b.meta);
1183
+
1184
+ assert_eq!(
1185
+ case.expected_value, entry_a.meta.value,
1186
+ "test case: {name} - value"
1187
+ );
1188
+ }
1189
+ }
1190
+ }