prometheus-client-mmap 1.2.1-aarch64-linux-musl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. checksums.yaml +7 -0
  2. data/.tool-versions +1 -0
  3. data/README.md +281 -0
  4. data/ext/fast_mmaped_file_rs/Cargo.toml +35 -0
  5. data/ext/fast_mmaped_file_rs/README.md +52 -0
  6. data/ext/fast_mmaped_file_rs/build.rs +5 -0
  7. data/ext/fast_mmaped_file_rs/extconf.rb +28 -0
  8. data/ext/fast_mmaped_file_rs/src/error.rs +174 -0
  9. data/ext/fast_mmaped_file_rs/src/file_entry.rs +784 -0
  10. data/ext/fast_mmaped_file_rs/src/file_info.rs +240 -0
  11. data/ext/fast_mmaped_file_rs/src/lib.rs +78 -0
  12. data/ext/fast_mmaped_file_rs/src/macros.rs +14 -0
  13. data/ext/fast_mmaped_file_rs/src/map.rs +492 -0
  14. data/ext/fast_mmaped_file_rs/src/mmap/inner.rs +704 -0
  15. data/ext/fast_mmaped_file_rs/src/mmap.rs +891 -0
  16. data/ext/fast_mmaped_file_rs/src/raw_entry.rs +473 -0
  17. data/ext/fast_mmaped_file_rs/src/testhelper.rs +222 -0
  18. data/ext/fast_mmaped_file_rs/src/util.rs +121 -0
  19. data/lib/3.1/fast_mmaped_file_rs.so +0 -0
  20. data/lib/3.2/fast_mmaped_file_rs.so +0 -0
  21. data/lib/3.3/fast_mmaped_file_rs.so +0 -0
  22. data/lib/3.4/fast_mmaped_file_rs.so +0 -0
  23. data/lib/prometheus/client/configuration.rb +23 -0
  24. data/lib/prometheus/client/counter.rb +27 -0
  25. data/lib/prometheus/client/formats/text.rb +85 -0
  26. data/lib/prometheus/client/gauge.rb +40 -0
  27. data/lib/prometheus/client/helper/entry_parser.rb +132 -0
  28. data/lib/prometheus/client/helper/file_locker.rb +50 -0
  29. data/lib/prometheus/client/helper/json_parser.rb +23 -0
  30. data/lib/prometheus/client/helper/metrics_processing.rb +45 -0
  31. data/lib/prometheus/client/helper/metrics_representation.rb +51 -0
  32. data/lib/prometheus/client/helper/mmaped_file.rb +64 -0
  33. data/lib/prometheus/client/helper/plain_file.rb +29 -0
  34. data/lib/prometheus/client/histogram.rb +80 -0
  35. data/lib/prometheus/client/label_set_validator.rb +85 -0
  36. data/lib/prometheus/client/metric.rb +80 -0
  37. data/lib/prometheus/client/mmaped_dict.rb +79 -0
  38. data/lib/prometheus/client/mmaped_value.rb +154 -0
  39. data/lib/prometheus/client/page_size.rb +17 -0
  40. data/lib/prometheus/client/push.rb +203 -0
  41. data/lib/prometheus/client/rack/collector.rb +88 -0
  42. data/lib/prometheus/client/rack/exporter.rb +96 -0
  43. data/lib/prometheus/client/registry.rb +65 -0
  44. data/lib/prometheus/client/simple_value.rb +31 -0
  45. data/lib/prometheus/client/summary.rb +69 -0
  46. data/lib/prometheus/client/support/puma.rb +44 -0
  47. data/lib/prometheus/client/support/unicorn.rb +35 -0
  48. data/lib/prometheus/client/uses_value_type.rb +20 -0
  49. data/lib/prometheus/client/version.rb +5 -0
  50. data/lib/prometheus/client.rb +58 -0
  51. data/lib/prometheus.rb +3 -0
  52. metadata +249 -0
@@ -0,0 +1,121 @@
1
+ use nix::errno::Errno;
2
+ use nix::libc::c_long;
3
+ use std::fmt::Display;
4
+ use std::io;
5
+ use std::mem::size_of;
6
+
7
+ use crate::error::MmapError;
8
+ use crate::Result;
9
+
10
+ /// Wrapper around `checked_add()` that converts failures
11
+ /// to `MmapError::Overflow`.
12
+ pub trait CheckedOps: Sized {
13
+ fn add_chk(self, rhs: Self) -> Result<Self>;
14
+ fn mul_chk(self, rhs: Self) -> Result<Self>;
15
+ }
16
+
17
+ impl CheckedOps for usize {
18
+ fn add_chk(self, rhs: Self) -> Result<Self> {
19
+ self.checked_add(rhs)
20
+ .ok_or_else(|| MmapError::overflowed(self, rhs, "adding"))
21
+ }
22
+
23
+ fn mul_chk(self, rhs: Self) -> Result<Self> {
24
+ self.checked_mul(rhs)
25
+ .ok_or_else(|| MmapError::overflowed(self, rhs, "multiplying"))
26
+ }
27
+ }
28
+
29
+ impl CheckedOps for c_long {
30
+ fn add_chk(self, rhs: Self) -> Result<Self> {
31
+ self.checked_add(rhs)
32
+ .ok_or_else(|| MmapError::overflowed(self, rhs, "adding"))
33
+ }
34
+
35
+ fn mul_chk(self, rhs: Self) -> Result<Self> {
36
+ self.checked_mul(rhs)
37
+ .ok_or_else(|| MmapError::overflowed(self, rhs, "multiplying"))
38
+ }
39
+ }
40
+
41
+ /// A wrapper around `TryFrom`, returning `MmapError::FailedCast` on error.
42
+ pub fn cast_chk<T, U>(val: T, name: &str) -> Result<U>
43
+ where
44
+ T: Copy + Display,
45
+ U: std::convert::TryFrom<T>,
46
+ {
47
+ U::try_from(val).map_err(|_| MmapError::failed_cast::<T, U>(val, name))
48
+ }
49
+
50
+ /// Retrieve errno(3).
51
+ pub fn errno() -> i32 {
52
+ // UNWRAP: This will always return `Some` when called from `last_os_error()`.
53
+ io::Error::last_os_error().raw_os_error().unwrap()
54
+ }
55
+
56
+ /// Get the error string associated with errno(3).
57
+ /// Equivalent to strerror(3).
58
+ pub fn strerror(errno: i32) -> &'static str {
59
+ Errno::from_i32(errno).desc()
60
+ }
61
+
62
+ /// Read a `u32` value from a byte slice starting from `offset`.
63
+ #[inline]
64
+ pub fn read_u32(buf: &[u8], offset: usize) -> Result<u32> {
65
+ if let Some(slice) = buf.get(offset..offset + size_of::<u32>()) {
66
+ // UNWRAP: We can safely unwrap the conversion from slice to array as we
67
+ // the source and targets are constructed here with the same length.
68
+ let out: &[u8; size_of::<u32>()] = slice.try_into().unwrap();
69
+
70
+ return Ok(u32::from_ne_bytes(*out));
71
+ }
72
+ Err(MmapError::out_of_bounds(offset, buf.len()))
73
+ }
74
+
75
+ /// Read an `f64` value from a byte slice starting from `offset`.
76
+ #[inline]
77
+ pub fn read_f64(buf: &[u8], offset: usize) -> Result<f64> {
78
+ if let Some(slice) = buf.get(offset..offset + size_of::<f64>()) {
79
+ // UNWRAP: We can safely unwrap the conversion from slice to array as we
80
+ // can be sure the target array has same length as the source slice.
81
+ let out: &[u8; size_of::<f64>()] = slice.try_into().unwrap();
82
+
83
+ return Ok(f64::from_ne_bytes(*out));
84
+ }
85
+ Err(MmapError::out_of_bounds(
86
+ offset + size_of::<f64>(),
87
+ buf.len(),
88
+ ))
89
+ }
90
+
91
+ #[cfg(test)]
92
+ mod test {
93
+ use super::*;
94
+
95
+ #[test]
96
+ fn test_read_u32() {
97
+ let buf = 1u32.to_ne_bytes();
98
+
99
+ assert!(matches!(read_u32(&buf, 0), Ok(1)), "index ok");
100
+ assert!(read_u32(&buf, 10).is_err(), "index out of range");
101
+ assert!(
102
+ read_u32(&buf, 1).is_err(),
103
+ "index in range but end out of range"
104
+ );
105
+ }
106
+
107
+ #[test]
108
+ fn test_read_f64() {
109
+ let buf = 1.00f64.to_ne_bytes();
110
+
111
+ let ok = read_f64(&buf, 0);
112
+ assert!(ok.is_ok());
113
+ assert_eq!(ok.unwrap(), 1.00);
114
+
115
+ assert!(read_f64(&buf, 10).is_err(), "index out of range");
116
+ assert!(
117
+ read_f64(&buf, 1).is_err(),
118
+ "index in range but end out of range"
119
+ );
120
+ }
121
+ }
Binary file
Binary file
Binary file
Binary file
@@ -0,0 +1,23 @@
1
+ require 'prometheus/client/registry'
2
+ require 'prometheus/client/mmaped_value'
3
+ require 'prometheus/client/page_size'
4
+ require 'logger'
5
+ require 'tmpdir'
6
+
7
+ module Prometheus
8
+ module Client
9
+ class Configuration
10
+ attr_accessor :value_class, :multiprocess_files_dir, :initial_mmap_file_size, :logger, :pid_provider
11
+
12
+ def initialize
13
+ @value_class = ::Prometheus::Client::MmapedValue
14
+ @initial_mmap_file_size = ::Prometheus::Client::PageSize.page_size(fallback_page_size: 4096)
15
+ @logger = Logger.new($stdout)
16
+ @pid_provider = Process.method(:pid)
17
+ @multiprocess_files_dir = ENV.fetch('prometheus_multiproc_dir') do
18
+ Dir.mktmpdir("prometheus-mmap")
19
+ end
20
+ end
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,27 @@
1
+ # encoding: UTF-8
2
+
3
+ require 'prometheus/client/metric'
4
+
5
+ module Prometheus
6
+ module Client
7
+ # Counter is a metric that exposes merely a sum or tally of things.
8
+ class Counter < Metric
9
+ def type
10
+ :counter
11
+ end
12
+
13
+ def increment(labels = {}, by = 1)
14
+ raise ArgumentError, 'increment must be a non-negative number' if by < 0
15
+
16
+ label_set = label_set_for(labels)
17
+ synchronize { @values[label_set].increment(by) }
18
+ end
19
+
20
+ private
21
+
22
+ def default(labels)
23
+ value_object(type, @name, @name, labels)
24
+ end
25
+ end
26
+ end
27
+ end
@@ -0,0 +1,85 @@
1
+ require 'prometheus/client/uses_value_type'
2
+ require 'prometheus/client/helper/json_parser'
3
+ require 'prometheus/client/helper/plain_file'
4
+ require 'prometheus/client/helper/metrics_processing'
5
+ require 'prometheus/client/helper/metrics_representation'
6
+
7
+ module Prometheus
8
+ module Client
9
+ module Formats
10
+ # Text format is human readable mainly used for manual inspection.
11
+ module Text
12
+ MEDIA_TYPE = 'text/plain'.freeze
13
+ VERSION = '0.0.4'.freeze
14
+ CONTENT_TYPE = "#{MEDIA_TYPE}; version=#{VERSION}".freeze
15
+
16
+ class << self
17
+ def marshal(registry)
18
+ metrics = registry.metrics.map do |metric|
19
+ samples = metric.values.flat_map do |label_set, value|
20
+ representation(metric, label_set, value)
21
+ end
22
+
23
+ [metric.name, { type: metric.type, help: metric.docstring, samples: samples }]
24
+ end
25
+
26
+ Helper::MetricsRepresentation.to_text(metrics)
27
+ end
28
+
29
+ def marshal_multiprocess(path = Prometheus::Client.configuration.multiprocess_files_dir)
30
+ file_list = Dir.glob(File.join(path, '*.db')).sort
31
+ .map {|f| Helper::PlainFile.new(f) }
32
+ .map {|f| [f.filepath, f.multiprocess_mode.to_sym, f.type.to_sym, f.pid] }
33
+
34
+ FastMmapedFileRs.to_metrics(file_list.to_a)
35
+ end
36
+
37
+ private
38
+
39
+ def load_metrics(path)
40
+ metrics = {}
41
+ Dir.glob(File.join(path, '*.db')).sort.each do |f|
42
+ Helper::PlainFile.new(f).to_metrics(metrics)
43
+ end
44
+
45
+ metrics
46
+ end
47
+
48
+ def representation(metric, label_set, value)
49
+ labels = metric.base_labels.merge(label_set)
50
+
51
+ if metric.type == :summary
52
+ summary(metric.name, labels, value)
53
+ elsif metric.type == :histogram
54
+ histogram(metric.name, labels, value)
55
+ else
56
+ [[metric.name, labels, value.get]]
57
+ end
58
+ end
59
+
60
+ def summary(name, set, value)
61
+ rv = value.get.map do |q, v|
62
+ [name, set.merge(quantile: q), v]
63
+ end
64
+
65
+ rv << ["#{name}_sum", set, value.get.sum]
66
+ rv << ["#{name}_count", set, value.get.total]
67
+ rv
68
+ end
69
+
70
+ def histogram(name, set, value)
71
+ # |metric_name, labels, value|
72
+ rv = value.get.map do |q, v|
73
+ [name, set.merge(le: q), v]
74
+ end
75
+
76
+ rv << [name, set.merge(le: '+Inf'), value.get.total]
77
+ rv << ["#{name}_sum", set, value.get.sum]
78
+ rv << ["#{name}_count", set, value.get.total]
79
+ rv
80
+ end
81
+ end
82
+ end
83
+ end
84
+ end
85
+ end
@@ -0,0 +1,40 @@
1
+ # encoding: UTF-8
2
+
3
+ require 'prometheus/client/metric'
4
+
5
+ module Prometheus
6
+ module Client
7
+ # A Gauge is a metric that exposes merely an instantaneous value or some
8
+ # snapshot thereof.
9
+ class Gauge < Metric
10
+ def initialize(name, docstring, base_labels = {}, multiprocess_mode=:all)
11
+ super(name, docstring, base_labels)
12
+ if value_class.multiprocess and ![:min, :max, :livesum, :liveall, :all].include?(multiprocess_mode)
13
+ raise ArgumentError, 'Invalid multiprocess mode: ' + multiprocess_mode
14
+ end
15
+ @multiprocess_mode = multiprocess_mode
16
+ end
17
+
18
+ def type
19
+ :gauge
20
+ end
21
+
22
+ def default(labels)
23
+ value_object(type, @name, @name, labels, @multiprocess_mode)
24
+ end
25
+
26
+ # Sets the value for the given label set
27
+ def set(labels, value)
28
+ @values[label_set_for(labels)].set(value)
29
+ end
30
+
31
+ def increment(labels, value)
32
+ @values[label_set_for(labels)].increment(value)
33
+ end
34
+
35
+ def decrement(labels, value)
36
+ @values[label_set_for(labels)].decrement(value)
37
+ end
38
+ end
39
+ end
40
+ end
@@ -0,0 +1,132 @@
1
+ require 'prometheus/client/helper/json_parser'
2
+
3
+ module Prometheus
4
+ module Client
5
+ module Helper
6
+ module EntryParser
7
+ class ParsingError < RuntimeError;
8
+ end
9
+
10
+ MINIMUM_SIZE = 8
11
+ START_POSITION = 8
12
+ VALUE_BYTES = 8
13
+ ENCODED_LENGTH_BYTES = 4
14
+
15
+ def used
16
+ slice(0..3).unpack('l')[0]
17
+ end
18
+
19
+ def parts
20
+ @parts ||= File.basename(filepath, '.db')
21
+ .split('_')
22
+ .map { |e| e.gsub(/-\d+$/, '') } # remove trailing -number
23
+ end
24
+
25
+ def type
26
+ parts[0].to_sym
27
+ end
28
+
29
+ def pid
30
+ (parts[2..-1] || []).join('_')
31
+ end
32
+
33
+ def multiprocess_mode
34
+ parts[1]
35
+ end
36
+
37
+ def empty?
38
+ size < MINIMUM_SIZE || used.zero?
39
+ end
40
+
41
+ def entries(ignore_errors = false)
42
+ return Enumerator.new {} if empty?
43
+
44
+ Enumerator.new do |yielder|
45
+ used_ = used # cache used to avoid unnecessary unpack operations
46
+
47
+ pos = START_POSITION # used + padding offset
48
+ while pos < used_ && pos < size && pos > 0
49
+ data = slice(pos..-1)
50
+ unless data
51
+ raise ParsingError, "data slice is nil at pos #{pos}" unless ignore_errors
52
+ pos += 8
53
+ next
54
+ end
55
+
56
+ encoded_len, first_encoded_bytes = data.unpack('LL')
57
+ if encoded_len.nil? || encoded_len.zero? || first_encoded_bytes.nil? || first_encoded_bytes.zero?
58
+ # do not parse empty data
59
+ pos += 8
60
+ next
61
+ end
62
+
63
+ entry_len = ENCODED_LENGTH_BYTES + encoded_len
64
+ padding_len = 8 - entry_len % 8
65
+
66
+ value_offset = entry_len + padding_len # align to 8 bytes
67
+ pos += value_offset
68
+
69
+ if value_offset > 0 && (pos + VALUE_BYTES) <= size # if positions are safe
70
+ yielder.yield data, encoded_len, value_offset, pos
71
+ else
72
+ raise ParsingError, "data slice is nil at pos #{pos}" unless ignore_errors
73
+ end
74
+ pos += VALUE_BYTES
75
+ end
76
+ end
77
+ end
78
+
79
+ def parsed_entries(ignore_errors = false)
80
+ result = entries(ignore_errors).map do |data, encoded_len, value_offset, _|
81
+ begin
82
+ encoded, value = data.unpack(format('@4A%d@%dd', encoded_len, value_offset))
83
+ [encoded, value]
84
+ rescue ArgumentError => e
85
+ Prometheus::Client.logger.debug("Error processing data: #{bin_to_hex(data[0, 7])} len: #{encoded_len} value_offset: #{value_offset}")
86
+ raise ParsingError, e unless ignore_errors
87
+ end
88
+ end
89
+
90
+ result.reject!(&:nil?) if ignore_errors
91
+ result
92
+ end
93
+
94
+ def to_metrics(metrics = {}, ignore_errors = false)
95
+ parsed_entries(ignore_errors).each do |key, value|
96
+ begin
97
+ metric_name, name, labelnames, labelvalues = JsonParser.load(key)
98
+ labelnames ||= []
99
+ labelvalues ||= []
100
+
101
+ metric = metrics.fetch(metric_name,
102
+ metric_name: metric_name,
103
+ help: 'Multiprocess metric',
104
+ type: type,
105
+ samples: [])
106
+ if type == :gauge
107
+ metric[:multiprocess_mode] = multiprocess_mode
108
+ metric[:samples] += [[name, labelnames.zip(labelvalues) + [['pid', pid]], value]]
109
+ else
110
+ # The duplicates and labels are fixed in the next for.
111
+ metric[:samples] += [[name, labelnames.zip(labelvalues), value]]
112
+ end
113
+ metrics[metric_name] = metric
114
+
115
+ rescue JSON::ParserError => e
116
+ raise ParsingError(e) unless ignore_errors
117
+ end
118
+ end
119
+
120
+ metrics.reject! { |e| e.nil? } if ignore_errors
121
+ metrics
122
+ end
123
+
124
+ private
125
+
126
+ def bin_to_hex(s)
127
+ s.each_byte.map { |b| b.to_s(16) }.join
128
+ end
129
+ end
130
+ end
131
+ end
132
+ end
@@ -0,0 +1,50 @@
1
+ module Prometheus
2
+ module Client
3
+ module Helper
4
+ class FileLocker
5
+ class << self
6
+ LOCK_FILE_MUTEX = Mutex.new
7
+
8
+ def lock_to_process(filepath)
9
+ LOCK_FILE_MUTEX.synchronize do
10
+ @file_locks ||= {}
11
+ return false if @file_locks[filepath]
12
+
13
+ file = File.open(filepath, 'ab')
14
+ if file.flock(File::LOCK_NB | File::LOCK_EX)
15
+ @file_locks[filepath] = file
16
+ return true
17
+ else
18
+ return false
19
+ end
20
+ end
21
+ end
22
+
23
+ def unlock(filepath)
24
+ LOCK_FILE_MUTEX.synchronize do
25
+ @file_locks ||= {}
26
+ return false unless @file_locks[filepath]
27
+
28
+ file = @file_locks[filepath]
29
+ file.flock(File::LOCK_UN)
30
+ file.close
31
+ @file_locks.delete(filepath)
32
+ end
33
+ end
34
+
35
+ def unlock_all
36
+ LOCK_FILE_MUTEX.synchronize do
37
+ @file_locks ||= {}
38
+ @file_locks.values.each do |file|
39
+ file.flock(File::LOCK_UN)
40
+ file.close
41
+ end
42
+
43
+ @file_locks = {}
44
+ end
45
+ end
46
+ end
47
+ end
48
+ end
49
+ end
50
+ end
@@ -0,0 +1,23 @@
1
+ require 'json'
2
+
3
+ module Prometheus
4
+ module Client
5
+ module Helper
6
+ module JsonParser
7
+ class << self
8
+ if defined?(Oj)
9
+ def load(s)
10
+ Oj.load(s)
11
+ rescue Oj::ParseError, EncodingError => e
12
+ raise JSON::ParserError.new(e.message)
13
+ end
14
+ else
15
+ def load(s)
16
+ JSON.parse(s)
17
+ end
18
+ end
19
+ end
20
+ end
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,45 @@
1
+ module Prometheus
2
+ module Client
3
+ module Helper
4
+ module MetricsProcessing
5
+ def self.merge_metrics(metrics)
6
+ metrics.each_value do |metric|
7
+ metric[:samples] = merge_samples(metric[:samples], metric[:type], metric[:multiprocess_mode]).map do |(name, labels), value|
8
+ [name, labels.to_h, value]
9
+ end
10
+ end
11
+ end
12
+
13
+ def self.merge_samples(raw_samples, metric_type, multiprocess_mode)
14
+ samples = {}
15
+ raw_samples.each do |name, labels, value|
16
+ without_pid = labels.reject { |l| l[0] == 'pid' }
17
+
18
+ case metric_type
19
+ when :gauge
20
+ case multiprocess_mode
21
+ when 'min'
22
+ s = samples.fetch([name, without_pid], value)
23
+ samples[[name, without_pid]] = [s, value].min
24
+ when 'max'
25
+ s = samples.fetch([name, without_pid], value)
26
+ samples[[name, without_pid]] = [s, value].max
27
+ when 'livesum'
28
+ s = samples.fetch([name, without_pid], 0.0)
29
+ samples[[name, without_pid]] = s + value
30
+ else # all/liveall
31
+ samples[[name, labels]] = value
32
+ end
33
+ else
34
+ # Counter, Histogram and Summary.
35
+ s = samples.fetch([name, without_pid], 0.0)
36
+ samples[[name, without_pid]] = s + value
37
+ end
38
+ end
39
+
40
+ samples
41
+ end
42
+ end
43
+ end
44
+ end
45
+ end
@@ -0,0 +1,51 @@
1
+ module Prometheus
2
+ module Client
3
+ module Helper
4
+ module MetricsRepresentation
5
+ METRIC_LINE = '%s%s %s'.freeze
6
+ TYPE_LINE = '# TYPE %s %s'.freeze
7
+ HELP_LINE = '# HELP %s %s'.freeze
8
+
9
+ LABEL = '%s="%s"'.freeze
10
+ SEPARATOR = ','.freeze
11
+ DELIMITER = "\n".freeze
12
+
13
+ REGEX = { doc: /[\n\\]/, label: /[\n\\"]/ }.freeze
14
+ REPLACE = { "\n" => '\n', '\\' => '\\\\', '"' => '\"' }.freeze
15
+
16
+ def self.to_text(metrics)
17
+ lines = []
18
+
19
+ metrics.each do |name, metric|
20
+ lines << format(HELP_LINE, name, escape(metric[:help]))
21
+ lines << format(TYPE_LINE, name, metric[:type])
22
+ metric[:samples].each do |metric_name, labels, value|
23
+ lines << metric(metric_name, format_labels(labels), value)
24
+ end
25
+ end
26
+
27
+ # there must be a trailing delimiter
28
+ (lines << nil).join(DELIMITER)
29
+ end
30
+
31
+ def self.metric(name, labels, value)
32
+ format(METRIC_LINE, name, labels, value)
33
+ end
34
+
35
+ def self.format_labels(set)
36
+ return if set.empty?
37
+
38
+ strings = set.each_with_object([]) do |(key, value), memo|
39
+ memo << format(LABEL, key, escape(value, :label))
40
+ end
41
+
42
+ "{#{strings.join(SEPARATOR)}}"
43
+ end
44
+
45
+ def self.escape(string, format = :doc)
46
+ string.to_s.gsub(REGEX[format], REPLACE)
47
+ end
48
+ end
49
+ end
50
+ end
51
+ end
@@ -0,0 +1,64 @@
1
+ require 'prometheus/client/helper/entry_parser'
2
+ require 'prometheus/client/helper/file_locker'
3
+
4
+ # load precompiled extension if available
5
+ begin
6
+ ruby_version = /(\d+\.\d+)/.match(RUBY_VERSION)
7
+ require_relative "../../../#{ruby_version}/fast_mmaped_file_rs"
8
+ rescue LoadError
9
+ require 'fast_mmaped_file_rs'
10
+ end
11
+
12
+ module Prometheus
13
+ module Client
14
+ module Helper
15
+ class MmapedFile < FastMmapedFileRs
16
+ include EntryParser
17
+
18
+ attr_reader :filepath, :size
19
+
20
+ def initialize(filepath)
21
+ @filepath = filepath
22
+
23
+ File.open(filepath, 'a+b') do |file|
24
+ file.truncate(initial_mmap_file_size) if file.size < MINIMUM_SIZE
25
+ @size = file.size
26
+ end
27
+
28
+ super(filepath)
29
+ end
30
+
31
+ def close
32
+ munmap
33
+ FileLocker.unlock(filepath)
34
+ end
35
+
36
+ private
37
+
38
+ def initial_mmap_file_size
39
+ Prometheus::Client.configuration.initial_mmap_file_size
40
+ end
41
+
42
+ public
43
+
44
+ class << self
45
+ def open(filepath)
46
+ MmapedFile.new(filepath)
47
+ end
48
+
49
+ def ensure_exclusive_file(file_prefix = 'mmaped_file')
50
+ (0..Float::INFINITY).lazy
51
+ .map { |f_num| "#{file_prefix}_#{Prometheus::Client.pid}-#{f_num}.db" }
52
+ .map { |filename| File.join(Prometheus::Client.configuration.multiprocess_files_dir, filename) }
53
+ .find { |path| Helper::FileLocker.lock_to_process(path) }
54
+ end
55
+
56
+ def open_exclusive_file(file_prefix = 'mmaped_file')
57
+ filename = Helper::MmapedFile.ensure_exclusive_file(file_prefix)
58
+ open(filename)
59
+ end
60
+ end
61
+ end
62
+ end
63
+ end
64
+ end