prometheus-client-mmap 0.7.0.beta12 → 0.7.0.beta13

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: d37ca4308c9dbdb10a22066b37df88b7e456cfb3
4
- data.tar.gz: a27b2e124d343da9bb5b8f639894cd84f846334a
3
+ metadata.gz: 6fcba3cc4377e6b167f11073280a999e4df75717
4
+ data.tar.gz: 9635faeeb5c2fb643176954f417acc738ee60756
5
5
  SHA512:
6
- metadata.gz: 8fba7137a3ea1f5fa1ea375bb1999d85b1765a6c73adb69ef77a0f6f2d77db4f1ac057a37de600baf8b6abe355aaac97ee4db88e4ccf82aa81b4b14f4d845df9
7
- data.tar.gz: e785931142ce33b5485b6dcb6742adaedc238235fd1c92e5117cfd3d483945912b9a721719a91ad6b013f4a8038dcb20c1a10e3bfe5fb47fff20115168055f9f
6
+ metadata.gz: 85ebce9a92214fb77be47134d3282a5a71661e9c0c8d2140f6aeed8abc00bf6780541ca33cb67f2394047137740f27098725406668eda36d4b1091ae4fbec126
7
+ data.tar.gz: 259e6ea42822d509900da3c97602774e4b69cf1384439202d53924fc12d16d7c814298adeeeefd7a553027f66b028601fe9be16701da3680f4fb14c7cb364932
data/README.md CHANGED
@@ -164,6 +164,27 @@ summary.get({ service: 'database' })
164
164
  # => { 0.5 => 0.1233122, 0.9 => 3.4323, 0.99 => 5.3428231 }
165
165
  ```
166
166
 
167
+ ## Pitfalls
168
+
169
+ ### PID cardinality
170
+
171
+ In multiprocess setup e.g. running under Unicorn, having worker process restart often can
172
+ lead to performance problems when proccesing metric files. By default each process using
173
+ Prometheus metrics will create a set of files based on that process PID. With high worker
174
+ churn this will lead to creation of thousands of files and in turn will cause very noticable
175
+ slowdown when displaying metrics
176
+
177
+ To reduce this problem, a surrogate process id can be used. Set of all such IDs needs
178
+ have low cardinality, and each process id must be unique among all running process.
179
+
180
+ For Unicorn a worker id/number can be used to greatly speedup the metrics rendering.
181
+
182
+ To use it add this line to your `configure` block:
183
+
184
+ ```ruby
185
+ config.pid_provider = Prometheus::Client::Support::Unicorn.method(:worker_pid_provider)
186
+ ```
187
+
167
188
  ## Tests
168
189
 
169
190
  Install necessary development gems with `bundle install` and run tests with
@@ -25,6 +25,10 @@ module Prometheus
25
25
  configuration.logger
26
26
  end
27
27
 
28
+ def pid
29
+ configuration.pid_provider.call
30
+ end
31
+
28
32
  def reinitialize_on_pid_change
29
33
  ::Prometheus::Client::MmapedValue.reinitialize_on_pid_change
30
34
  end
@@ -5,13 +5,14 @@ require 'logger'
5
5
  module Prometheus
6
6
  module Client
7
7
  class Configuration
8
- attr_accessor :value_class, :multiprocess_files_dir, :initial_mmap_file_size, :logger
8
+ attr_accessor :value_class, :multiprocess_files_dir, :initial_mmap_file_size, :logger, :pid_provider
9
9
 
10
10
  def initialize
11
11
  @value_class = ::Prometheus::Client::MmapedValue
12
12
  @multiprocess_files_dir = ENV['prometheus_multiproc_dir']
13
13
  @initial_mmap_file_size = 4 * 1024
14
14
  @logger = Logger.new($stdout)
15
+ @pid_provider = Process.method(:pid)
15
16
  end
16
17
  end
17
18
  end
@@ -1,4 +1,5 @@
1
1
  require 'prometheus/client/uses_value_type'
2
+ require 'prometheus/client/helper/json_parser'
2
3
 
3
4
  module Prometheus
4
5
  module Client
@@ -96,29 +97,24 @@ module Prometheus
96
97
  Dir.glob(File.join(path, '*.db')).sort.each do |f|
97
98
  parts = File.basename(f, '.db').split('_')
98
99
  type = parts[0].to_sym
99
- d = MmapedDict.new(f)
100
-
101
- begin
102
- d.all_values.each do |key, value|
103
- metric_name, name, labelnames, labelvalues = JSON.parse(key)
104
- metric = metrics.fetch(metric_name,
105
- metric_name: metric_name,
106
- help: 'Multiprocess metric',
107
- type: type,
108
- samples: []
109
- )
110
- if type == :gauge
111
- pid = parts[2]
112
- metric[:multiprocess_mode] = parts[1]
113
- metric[:samples] += [[name, labelnames.zip(labelvalues) + [['pid', pid]], value]]
114
- else
115
- # The duplicates and labels are fixed in the next for.
116
- metric[:samples] += [[name, labelnames.zip(labelvalues), value]]
117
- end
118
- metrics[metric_name] = metric
100
+
101
+ MmapedDict.read_all_values(f).each do |key, value|
102
+ metric_name, name, labelnames, labelvalues = Helper::JsonParser.load(key)
103
+ metric = metrics.fetch(metric_name,
104
+ metric_name: metric_name,
105
+ help: 'Multiprocess metric',
106
+ type: type,
107
+ samples: []
108
+ )
109
+ if type == :gauge
110
+ pid = parts[2]
111
+ metric[:multiprocess_mode] = parts[1]
112
+ metric[:samples] += [[name, labelnames.zip(labelvalues) + [['pid', pid]], value]]
113
+ else
114
+ # The duplicates and labels are fixed in the next for.
115
+ metric[:samples] += [[name, labelnames.zip(labelvalues), value]]
119
116
  end
120
- ensure
121
- d.close
117
+ metrics[metric_name] = metric
122
118
  end
123
119
  end
124
120
  metrics
@@ -0,0 +1,19 @@
1
+ module Prometheus
2
+ module Client
3
+ module Helper
4
+ module JsonParser
5
+ class << self
6
+ if defined?(Oj)
7
+ def load(s)
8
+ Oj.load(s)
9
+ end
10
+ else
11
+ def load(s)
12
+ JSON.parse(s)
13
+ end
14
+ end
15
+ end
16
+ end
17
+ end
18
+ end
19
+ end
@@ -3,7 +3,8 @@ require 'mmap'
3
3
 
4
4
  module Prometheus
5
5
  module Client
6
- class ParsingError < StandardError; end
6
+ class ParsingError < StandardError
7
+ end
7
8
 
8
9
  # A dict of doubles, backed by an mmapped file.
9
10
  #
@@ -23,30 +24,45 @@ module Prometheus
23
24
  @f = File.open(filename, 'a+b')
24
25
  process_file
25
26
  rescue StandardError => e
26
- raise ParsingError.new("exception #{e} while processing metrics file #{@f.path}")
27
+ raise ParsingError, "exception #{e} while processing metrics file #{@f.path}"
27
28
  end
28
29
 
29
- # Yield (key, value, pos). No locking is performed.
30
- def all_values
31
- read_all_values.map { |k, v, p| [k, v] }
30
+ # Yield (key, value). No locking is performed.
31
+ def self.read_all_values(f)
32
+ m = Mmap.new(f, 'rw', Mmap::MAP_SHARED)
33
+ used, = m[0..3].unpack('l')
34
+ pos = 8
35
+ values = []
36
+ while pos < used
37
+ data = m.slice(pos..-1)
38
+ encoded_len, = data.unpack('l')
39
+ value_offset = 4 + encoded_len + (8 - (encoded_len + 4) % 8)
40
+
41
+ encoded, value = data.unpack(format('@4A%d@%dd', encoded_len, value_offset))
42
+ values << [encoded, value]
43
+ pos += value_offset + 8
44
+ end
45
+ values
46
+ ensure
47
+ m.munmap
32
48
  end
33
49
 
34
50
  def read_value(key)
35
51
  @mutex.synchronize do
36
- init_value(key) unless @positions.has_key?(key)
52
+ init_value(key) unless @positions.key?(key)
37
53
  end
38
54
  pos = @positions[key]
39
55
  # We assume that reading from an 8 byte aligned value is atomic.
40
- @m[pos..pos+7].unpack('d')[0]
56
+ @m[pos..pos + 7].unpack('d')[0]
41
57
  end
42
58
 
43
59
  def write_value(key, value)
44
60
  @mutex.synchronize do
45
- init_value(key) unless @positions.has_key?(key)
61
+ init_value(key) unless @positions.key?(key)
46
62
  end
47
63
  pos = @positions[key]
48
64
  # We assume that writing to an 8 byte aligned value is atomic.
49
- @m[pos..pos+7] = [value].pack('d')
65
+ @m[pos..pos + 7] = [value].pack('d')
50
66
  end
51
67
 
52
68
  def close
@@ -64,9 +80,7 @@ module Prometheus
64
80
  private
65
81
 
66
82
  def process_file
67
- if @f.size < MINIMUM_SIZE
68
- @f.truncate(initial_mmap_file_size)
69
- end
83
+ @f.truncate(initial_mmap_file_size) if @f.size < MINIMUM_SIZE
70
84
 
71
85
  @capacity = @f.size
72
86
  @m = Mmap.new(@f.path, 'rw', Mmap::MAP_SHARED)
@@ -78,7 +92,7 @@ module Prometheus
78
92
  @used = 8
79
93
  @m[0..3] = [@used].pack('l')
80
94
  else
81
- read_all_values.each do |key, _, pos|
95
+ read_all_positions.each do |key, _, pos|
82
96
  @positions[key] = pos
83
97
  end
84
98
  end
@@ -104,21 +118,20 @@ module Prometheus
104
118
  end
105
119
 
106
120
  # Yield (key, value, pos). No locking is performed.
107
- def read_all_values
121
+ def read_all_positions
108
122
  pos = 8
109
123
  values = []
110
124
  while pos < @used
111
- encoded_len = @m[pos..-1].unpack('l')[0]
112
- pos += 4
113
- encoded = @m[pos..-1].unpack("A#{encoded_len}")[0]
114
- padded_len = encoded_len + (8 - (encoded_len + 4) % 8)
115
- pos += padded_len
116
- value = @m[pos..-1].unpack('d')[0]
117
- values << [encoded, value, pos]
125
+ data = @m.slice(pos..-1)
126
+ encoded_len = data.unpack('l')[0]
127
+ padding_len = 8 - (encoded_len + 4) % 8
128
+ encoded = data.unpack(format('@4A%d', encoded_len))
129
+ pos += 4 + encoded_len + padding_len
130
+ values << [encoded, pos]
118
131
  pos += 8
119
132
  end
120
133
  values
121
134
  end
122
135
  end
123
136
  end
124
- end
137
+ end
@@ -1,3 +1,4 @@
1
+ require 'prometheus/client'
1
2
  require 'prometheus/client/mmaped_dict'
2
3
  require 'json'
3
4
 
@@ -7,10 +8,10 @@ module Prometheus
7
8
  class MmapedValue
8
9
  @@files = {}
9
10
  @@files_lock = Mutex.new
10
- @@pid = Process.pid
11
+ @@pid = -1
11
12
 
12
13
  def initialize(type, metric_name, name, labels, multiprocess_mode = '')
13
- @pid = Process.pid
14
+ @pid = Prometheus::Client.pid
14
15
  @file_prefix = type.to_s
15
16
  @metric_name = metric_name
16
17
  @name = name
@@ -50,7 +51,7 @@ module Prometheus
50
51
  def self.reset_on_pid_change
51
52
  @@files_lock.synchronize do
52
53
  if pid_changed?
53
- @@pid = Process.pid
54
+ @@pid = Prometheus::Client.pid
54
55
  @@files = {}
55
56
  end
56
57
  end
@@ -62,7 +63,7 @@ module Prometheus
62
63
  end
63
64
 
64
65
  def self.pid_changed?
65
- @@pid != Process.pid
66
+ @@pid != Prometheus::Client.pid
66
67
  end
67
68
 
68
69
  def self.multiprocess
@@ -70,8 +71,8 @@ module Prometheus
70
71
  end
71
72
 
72
73
  def reinitialize
73
- if @pid != Process.pid
74
- @pid = Process.pid
74
+ if @pid != Prometheus::Client.pid
75
+ @pid = Prometheus::Client.pid
75
76
  initialize_file
76
77
  end
77
78
  end
@@ -0,0 +1,24 @@
1
+ module Prometheus
2
+ module Client
3
+ module Support
4
+ module Unicorn
5
+ def self.worker_pid_provider
6
+ wid = worker_id
7
+ Process.pid if wid.nil?
8
+
9
+ "worker_id_#{wid}"
10
+ end
11
+
12
+ def self.worker_id
13
+ return unless defined?(::Unicorn::Worker)
14
+
15
+ workers = ObjectSpace.each_object(::Unicorn::Worker)
16
+ return if workers.nil?
17
+
18
+ workers_first = workers.first
19
+ workers_first.nr unless workers_first.nil?
20
+ end
21
+ end
22
+ end
23
+ end
24
+ end
@@ -2,6 +2,6 @@
2
2
 
3
3
  module Prometheus
4
4
  module Client
5
- VERSION = '0.7.0.beta12'
5
+ VERSION = '0.7.0.beta13'
6
6
  end
7
7
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: prometheus-client-mmap
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.7.0.beta12
4
+ version: 0.7.0.beta13
5
5
  platform: ruby
6
6
  authors:
7
7
  - Tobias Schmidt
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2017-08-18 00:00:00.000000000 Z
11
+ date: 2017-08-22 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: mmap2
@@ -44,6 +44,7 @@ files:
44
44
  - lib/prometheus/client/counter.rb
45
45
  - lib/prometheus/client/formats/text.rb
46
46
  - lib/prometheus/client/gauge.rb
47
+ - lib/prometheus/client/helper/json_parser.rb
47
48
  - lib/prometheus/client/histogram.rb
48
49
  - lib/prometheus/client/label_set_validator.rb
49
50
  - lib/prometheus/client/metric.rb
@@ -55,6 +56,7 @@ files:
55
56
  - lib/prometheus/client/registry.rb
56
57
  - lib/prometheus/client/simple_value.rb
57
58
  - lib/prometheus/client/summary.rb
59
+ - lib/prometheus/client/support/unicorn.rb
58
60
  - lib/prometheus/client/uses_value_type.rb
59
61
  - lib/prometheus/client/version.rb
60
62
  homepage: https://gitlab.com/gitlab-org/prometheus-client-mmap