prometheus_exporter 2.1.0 → 2.2.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.github/workflows/ci.yml +10 -6
- data/.rubocop +1 -0
- data/.rubocop.yml +12 -1
- data/.streerc +2 -0
- data/CHANGELOG +12 -1
- data/README.md +22 -9
- data/bench/bench.rb +12 -11
- data/bin/prometheus_exporter +2 -2
- data/examples/custom_collector.rb +1 -3
- data/gemfiles/ar_70.gemfile +2 -0
- data/gemfiles/ar_71.gemfile +7 -0
- data/lib/prometheus_exporter/client.rb +16 -32
- data/lib/prometheus_exporter/instrumentation/active_record.rb +20 -8
- data/lib/prometheus_exporter/instrumentation/delayed_job.rb +23 -13
- data/lib/prometheus_exporter/instrumentation/good_job.rb +2 -4
- data/lib/prometheus_exporter/instrumentation/hutch.rb +1 -1
- data/lib/prometheus_exporter/instrumentation/method_profiler.rb +16 -16
- data/lib/prometheus_exporter/instrumentation/periodic_stats.rb +13 -21
- data/lib/prometheus_exporter/instrumentation/process.rb +14 -6
- data/lib/prometheus_exporter/instrumentation/puma.rb +1 -1
- data/lib/prometheus_exporter/instrumentation/resque.rb +1 -3
- data/lib/prometheus_exporter/instrumentation/shoryuken.rb +6 -7
- data/lib/prometheus_exporter/instrumentation/sidekiq.rb +4 -6
- data/lib/prometheus_exporter/instrumentation/sidekiq_process.rb +12 -19
- data/lib/prometheus_exporter/instrumentation/sidekiq_queue.rb +15 -18
- data/lib/prometheus_exporter/instrumentation/sidekiq_stats.rb +10 -15
- data/lib/prometheus_exporter/instrumentation/unicorn.rb +2 -2
- data/lib/prometheus_exporter/metric/base.rb +8 -7
- data/lib/prometheus_exporter/metric/counter.rb +1 -3
- data/lib/prometheus_exporter/metric/gauge.rb +2 -6
- data/lib/prometheus_exporter/metric/histogram.rb +0 -2
- data/lib/prometheus_exporter/metric/summary.rb +5 -14
- data/lib/prometheus_exporter/middleware.rb +40 -32
- data/lib/prometheus_exporter/server/active_record_collector.rb +11 -6
- data/lib/prometheus_exporter/server/collector.rb +12 -16
- data/lib/prometheus_exporter/server/collector_base.rb +0 -2
- data/lib/prometheus_exporter/server/delayed_job_collector.rb +65 -28
- data/lib/prometheus_exporter/server/good_job_collector.rb +1 -1
- data/lib/prometheus_exporter/server/hutch_collector.rb +19 -11
- data/lib/prometheus_exporter/server/metrics_container.rb +4 -4
- data/lib/prometheus_exporter/server/process_collector.rb +7 -3
- data/lib/prometheus_exporter/server/puma_collector.rb +4 -10
- data/lib/prometheus_exporter/server/resque_collector.rb +1 -1
- data/lib/prometheus_exporter/server/runner.rb +34 -13
- data/lib/prometheus_exporter/server/shoryuken_collector.rb +22 -17
- data/lib/prometheus_exporter/server/sidekiq_collector.rb +22 -14
- data/lib/prometheus_exporter/server/sidekiq_process_collector.rb +9 -5
- data/lib/prometheus_exporter/server/sidekiq_queue_collector.rb +7 -6
- data/lib/prometheus_exporter/server/sidekiq_stats_collector.rb +12 -11
- data/lib/prometheus_exporter/server/unicorn_collector.rb +4 -4
- data/lib/prometheus_exporter/server/web_collector.rb +39 -22
- data/lib/prometheus_exporter/server/web_server.rb +10 -20
- data/lib/prometheus_exporter/version.rb +1 -1
- data/prometheus_exporter.gemspec +20 -22
- metadata +44 -13
@@ -10,15 +10,15 @@ module PrometheusExporter::Server
|
|
10
10
|
dead: "Dead connections in pool",
|
11
11
|
idle: "Idle connections in pool",
|
12
12
|
waiting: "Connection requests waiting",
|
13
|
-
size: "Maximum allowed connection pool size"
|
13
|
+
size: "Maximum allowed connection pool size",
|
14
14
|
}
|
15
15
|
|
16
16
|
def initialize
|
17
17
|
@active_record_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
|
18
|
-
@active_record_metrics.filter = ->
|
18
|
+
@active_record_metrics.filter = ->(new_metric, old_metric) do
|
19
19
|
new_metric["pid"] == old_metric["pid"] &&
|
20
|
-
|
21
|
-
|
20
|
+
new_metric["hostname"] == old_metric["hostname"] &&
|
21
|
+
new_metric["metric_labels"]["pool_name"] == old_metric["metric_labels"]["pool_name"]
|
22
22
|
end
|
23
23
|
end
|
24
24
|
|
@@ -32,13 +32,18 @@ module PrometheusExporter::Server
|
|
32
32
|
metrics = {}
|
33
33
|
|
34
34
|
@active_record_metrics.map do |m|
|
35
|
-
metric_key =
|
35
|
+
metric_key =
|
36
|
+
(m["metric_labels"] || {}).merge("pid" => m["pid"], "hostname" => m["hostname"])
|
36
37
|
metric_key.merge!(m["custom_labels"]) if m["custom_labels"]
|
37
38
|
|
38
39
|
ACTIVE_RECORD_GAUGES.map do |k, help|
|
39
40
|
k = k.to_s
|
40
41
|
if v = m[k]
|
41
|
-
g =
|
42
|
+
g =
|
43
|
+
metrics[k] ||= PrometheusExporter::Metric::Gauge.new(
|
44
|
+
"active_record_connection_pool_#{k}",
|
45
|
+
help,
|
46
|
+
)
|
42
47
|
g.observe(v, metric_key)
|
43
48
|
end
|
44
49
|
end
|
@@ -1,9 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module PrometheusExporter::Server
|
4
|
-
|
5
4
|
class Collector < CollectorBase
|
6
|
-
|
7
5
|
def initialize(json_serializer: nil)
|
8
6
|
@process_metrics = []
|
9
7
|
@metrics = {}
|
@@ -40,19 +38,15 @@ module PrometheusExporter::Server
|
|
40
38
|
collector.collect(obj)
|
41
39
|
else
|
42
40
|
metric = @metrics[obj["name"]]
|
43
|
-
if !metric
|
44
|
-
metric = register_metric_unsafe(obj)
|
45
|
-
end
|
41
|
+
metric = register_metric_unsafe(obj) if !metric
|
46
42
|
|
47
43
|
keys = obj["keys"] || {}
|
48
|
-
if obj["custom_labels"]
|
49
|
-
keys = obj["custom_labels"].merge(keys)
|
50
|
-
end
|
44
|
+
keys = obj["custom_labels"].merge(keys) if obj["custom_labels"]
|
51
45
|
|
52
46
|
case obj["prometheus_exporter_action"]
|
53
|
-
when
|
47
|
+
when "increment"
|
54
48
|
metric.increment(keys, obj["value"])
|
55
|
-
when
|
49
|
+
when "decrement"
|
56
50
|
metric.decrement(keys, obj["value"])
|
57
51
|
else
|
58
52
|
metric.observe(obj["value"], keys)
|
@@ -63,15 +57,14 @@ module PrometheusExporter::Server
|
|
63
57
|
|
64
58
|
def prometheus_metrics_text
|
65
59
|
@mutex.synchronize do
|
66
|
-
(@metrics.values + @collectors.values.map(&:metrics).flatten)
|
67
|
-
|
60
|
+
(@metrics.values + @collectors.values.map(&:metrics).flatten).map(
|
61
|
+
&:to_prometheus_text
|
62
|
+
).join("\n")
|
68
63
|
end
|
69
64
|
end
|
70
65
|
|
71
66
|
def register_metric(metric)
|
72
|
-
@mutex.synchronize
|
73
|
-
@metrics[metric.name] = metric
|
74
|
-
end
|
67
|
+
@mutex.synchronize { @metrics[metric.name] = metric }
|
75
68
|
end
|
76
69
|
|
77
70
|
protected
|
@@ -101,7 +94,10 @@ module PrometheusExporter::Server
|
|
101
94
|
end
|
102
95
|
|
103
96
|
def symbolize_keys(hash)
|
104
|
-
hash.inject({})
|
97
|
+
hash.inject({}) do |memo, k|
|
98
|
+
memo[k.first.to_sym] = k.last
|
99
|
+
memo
|
100
|
+
end
|
105
101
|
end
|
106
102
|
end
|
107
103
|
end
|
@@ -20,19 +20,31 @@ module PrometheusExporter::Server
|
|
20
20
|
end
|
21
21
|
|
22
22
|
def collect(obj)
|
23
|
-
custom_labels = obj[
|
24
|
-
gauge_labels = { queue_name: obj[
|
25
|
-
counter_labels = gauge_labels.merge(job_name: obj[
|
23
|
+
custom_labels = obj["custom_labels"] || {}
|
24
|
+
gauge_labels = { queue_name: obj["queue_name"] }.merge(custom_labels)
|
25
|
+
counter_labels = gauge_labels.merge(job_name: obj["name"])
|
26
26
|
|
27
27
|
ensure_delayed_job_metrics
|
28
28
|
@delayed_job_duration_seconds.observe(obj["duration"], counter_labels)
|
29
29
|
@delayed_job_latency_seconds_total.observe(obj["latency"], counter_labels)
|
30
30
|
@delayed_jobs_total.observe(1, counter_labels)
|
31
31
|
@delayed_failed_jobs_total.observe(1, counter_labels) if !obj["success"]
|
32
|
-
|
32
|
+
if obj["attempts"] >= obj["max_attempts"]
|
33
|
+
@delayed_jobs_max_attempts_reached_total.observe(1, counter_labels)
|
34
|
+
end
|
33
35
|
@delayed_job_duration_seconds_summary.observe(obj["duration"], counter_labels)
|
34
|
-
|
35
|
-
|
36
|
+
if obj["success"]
|
37
|
+
@delayed_job_duration_seconds_summary.observe(
|
38
|
+
obj["duration"],
|
39
|
+
counter_labels.merge(status: "success"),
|
40
|
+
)
|
41
|
+
end
|
42
|
+
if !obj["success"]
|
43
|
+
@delayed_job_duration_seconds_summary.observe(
|
44
|
+
obj["duration"],
|
45
|
+
counter_labels.merge(status: "failed"),
|
46
|
+
)
|
47
|
+
end
|
36
48
|
@delayed_job_attempts_summary.observe(obj["attempts"], counter_labels) if obj["success"]
|
37
49
|
@delayed_jobs_enqueued.observe(obj["enqueued"], gauge_labels)
|
38
50
|
@delayed_jobs_pending.observe(obj["pending"], gauge_labels)
|
@@ -40,9 +52,17 @@ module PrometheusExporter::Server
|
|
40
52
|
|
41
53
|
def metrics
|
42
54
|
if @delayed_jobs_total
|
43
|
-
[
|
44
|
-
|
45
|
-
|
55
|
+
[
|
56
|
+
@delayed_job_duration_seconds,
|
57
|
+
@delayed_job_latency_seconds_total,
|
58
|
+
@delayed_jobs_total,
|
59
|
+
@delayed_failed_jobs_total,
|
60
|
+
@delayed_jobs_max_attempts_reached_total,
|
61
|
+
@delayed_job_duration_seconds_summary,
|
62
|
+
@delayed_job_attempts_summary,
|
63
|
+
@delayed_jobs_enqueued,
|
64
|
+
@delayed_jobs_pending,
|
65
|
+
]
|
46
66
|
else
|
47
67
|
[]
|
48
68
|
end
|
@@ -52,42 +72,59 @@ module PrometheusExporter::Server
|
|
52
72
|
|
53
73
|
def ensure_delayed_job_metrics
|
54
74
|
if !@delayed_jobs_total
|
55
|
-
|
56
75
|
@delayed_job_duration_seconds =
|
57
|
-
|
58
|
-
|
76
|
+
PrometheusExporter::Metric::Counter.new(
|
77
|
+
"delayed_job_duration_seconds",
|
78
|
+
"Total time spent in delayed jobs.",
|
79
|
+
)
|
59
80
|
|
60
81
|
@delayed_job_latency_seconds_total =
|
61
|
-
|
62
|
-
|
82
|
+
PrometheusExporter::Metric::Counter.new(
|
83
|
+
"delayed_job_latency_seconds_total",
|
84
|
+
"Total delayed jobs latency.",
|
85
|
+
)
|
63
86
|
|
64
87
|
@delayed_jobs_total =
|
65
|
-
|
66
|
-
|
88
|
+
PrometheusExporter::Metric::Counter.new(
|
89
|
+
"delayed_jobs_total",
|
90
|
+
"Total number of delayed jobs executed.",
|
91
|
+
)
|
67
92
|
|
68
93
|
@delayed_jobs_enqueued =
|
69
|
-
|
70
|
-
|
94
|
+
PrometheusExporter::Metric::Gauge.new(
|
95
|
+
"delayed_jobs_enqueued",
|
96
|
+
"Number of enqueued delayed jobs.",
|
97
|
+
)
|
71
98
|
|
72
99
|
@delayed_jobs_pending =
|
73
|
-
|
74
|
-
|
100
|
+
PrometheusExporter::Metric::Gauge.new(
|
101
|
+
"delayed_jobs_pending",
|
102
|
+
"Number of pending delayed jobs.",
|
103
|
+
)
|
75
104
|
|
76
105
|
@delayed_failed_jobs_total =
|
77
|
-
|
78
|
-
|
106
|
+
PrometheusExporter::Metric::Counter.new(
|
107
|
+
"delayed_failed_jobs_total",
|
108
|
+
"Total number failed delayed jobs executed.",
|
109
|
+
)
|
79
110
|
|
80
111
|
@delayed_jobs_max_attempts_reached_total =
|
81
|
-
|
82
|
-
|
112
|
+
PrometheusExporter::Metric::Counter.new(
|
113
|
+
"delayed_jobs_max_attempts_reached_total",
|
114
|
+
"Total number of delayed jobs that reached max attempts.",
|
115
|
+
)
|
83
116
|
|
84
117
|
@delayed_job_duration_seconds_summary =
|
85
|
-
|
86
|
-
|
118
|
+
PrometheusExporter::Metric::Base.default_aggregation.new(
|
119
|
+
"delayed_job_duration_seconds_summary",
|
120
|
+
"Summary of the time it takes jobs to execute.",
|
121
|
+
)
|
87
122
|
|
88
123
|
@delayed_job_attempts_summary =
|
89
|
-
|
90
|
-
|
124
|
+
PrometheusExporter::Metric::Base.default_aggregation.new(
|
125
|
+
"delayed_job_attempts_summary",
|
126
|
+
"Summary of the amount of attempts it takes delayed jobs to succeed.",
|
127
|
+
)
|
91
128
|
end
|
92
129
|
end
|
93
130
|
end
|
@@ -10,7 +10,7 @@ module PrometheusExporter::Server
|
|
10
10
|
running: "Total number of running GoodJob jobs.",
|
11
11
|
finished: "Total number of finished GoodJob jobs.",
|
12
12
|
succeeded: "Total number of succeeded GoodJob jobs.",
|
13
|
-
discarded: "Total number of discarded GoodJob jobs."
|
13
|
+
discarded: "Total number of discarded GoodJob jobs.",
|
14
14
|
}
|
15
15
|
|
16
16
|
def initialize
|
@@ -14,8 +14,8 @@ module PrometheusExporter::Server
|
|
14
14
|
end
|
15
15
|
|
16
16
|
def collect(obj)
|
17
|
-
default_labels = { job_name: obj[
|
18
|
-
custom_labels = obj[
|
17
|
+
default_labels = { job_name: obj["name"] }
|
18
|
+
custom_labels = obj["custom_labels"]
|
19
19
|
labels = custom_labels.nil? ? default_labels : default_labels.merge(custom_labels)
|
20
20
|
|
21
21
|
ensure_hutch_metrics
|
@@ -36,15 +36,23 @@ module PrometheusExporter::Server
|
|
36
36
|
|
37
37
|
def ensure_hutch_metrics
|
38
38
|
if !@hutch_jobs_total
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
39
|
+
@hutch_job_duration_seconds =
|
40
|
+
PrometheusExporter::Metric::Counter.new(
|
41
|
+
"hutch_job_duration_seconds",
|
42
|
+
"Total time spent in hutch jobs.",
|
43
|
+
)
|
44
|
+
|
45
|
+
@hutch_jobs_total =
|
46
|
+
PrometheusExporter::Metric::Counter.new(
|
47
|
+
"hutch_jobs_total",
|
48
|
+
"Total number of hutch jobs executed.",
|
49
|
+
)
|
50
|
+
|
51
|
+
@hutch_failed_jobs_total =
|
52
|
+
PrometheusExporter::Metric::Counter.new(
|
53
|
+
"hutch_failed_jobs_total",
|
54
|
+
"Total number failed hutch jobs executed.",
|
55
|
+
)
|
48
56
|
end
|
49
57
|
end
|
50
58
|
end
|
@@ -9,10 +9,10 @@ module PrometheusExporter::Server
|
|
9
9
|
attr_accessor :filter
|
10
10
|
|
11
11
|
def initialize(ttl: METRIC_MAX_AGE, expire_attr: METRIC_EXPIRE_ATTR, filter: nil)
|
12
|
-
@data
|
13
|
-
@ttl
|
14
|
-
@expire_attr
|
15
|
-
@filter
|
12
|
+
@data = []
|
13
|
+
@ttl = ttl
|
14
|
+
@expire_attr = expire_attr
|
15
|
+
@filter = filter
|
16
16
|
end
|
17
17
|
|
18
18
|
def <<(obj)
|
@@ -1,7 +1,6 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module PrometheusExporter::Server
|
4
|
-
|
5
4
|
class ProcessCollector < TypeCollector
|
6
5
|
MAX_METRIC_AGE = 60
|
7
6
|
|
@@ -13,6 +12,10 @@ module PrometheusExporter::Server
|
|
13
12
|
v8_physical_size: "Physical size consumed by V8 heaps.",
|
14
13
|
v8_heap_count: "Number of V8 contexts running.",
|
15
14
|
rss: "Total RSS used by process.",
|
15
|
+
malloc_increase_bytes_limit:
|
16
|
+
"Limit before Ruby triggers a GC against current objects (bytes).",
|
17
|
+
oldmalloc_increase_bytes_limit:
|
18
|
+
"Limit before Ruby triggers a major GC against old objects (bytes).",
|
16
19
|
}
|
17
20
|
|
18
21
|
PROCESS_COUNTERS = {
|
@@ -23,7 +26,7 @@ module PrometheusExporter::Server
|
|
23
26
|
|
24
27
|
def initialize
|
25
28
|
@process_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
|
26
|
-
@process_metrics.filter = ->
|
29
|
+
@process_metrics.filter = ->(new_metric, old_metric) do
|
27
30
|
new_metric["pid"] == old_metric["pid"] && new_metric["hostname"] == old_metric["hostname"]
|
28
31
|
end
|
29
32
|
end
|
@@ -38,7 +41,8 @@ module PrometheusExporter::Server
|
|
38
41
|
metrics = {}
|
39
42
|
|
40
43
|
@process_metrics.map do |m|
|
41
|
-
metric_key =
|
44
|
+
metric_key =
|
45
|
+
(m["metric_labels"] || {}).merge("pid" => m["pid"], "hostname" => m["hostname"])
|
42
46
|
metric_key.merge!(m["custom_labels"]) if m["custom_labels"]
|
43
47
|
|
44
48
|
PROCESS_GAUGES.map do |k, help|
|
@@ -15,7 +15,7 @@ module PrometheusExporter::Server
|
|
15
15
|
|
16
16
|
def initialize
|
17
17
|
@puma_metrics = MetricsContainer.new(ttl: MAX_PUMA_METRIC_AGE)
|
18
|
-
@puma_metrics.filter = ->
|
18
|
+
@puma_metrics.filter = ->(new_metric, old_metric) do
|
19
19
|
new_metric["pid"] == old_metric["pid"] && new_metric["hostname"] == old_metric["hostname"]
|
20
20
|
end
|
21
21
|
end
|
@@ -31,15 +31,9 @@ module PrometheusExporter::Server
|
|
31
31
|
|
32
32
|
@puma_metrics.map do |m|
|
33
33
|
labels = {}
|
34
|
-
if m["phase"]
|
35
|
-
|
36
|
-
|
37
|
-
if m["custom_labels"]
|
38
|
-
labels.merge!(m["custom_labels"])
|
39
|
-
end
|
40
|
-
if m["metric_labels"]
|
41
|
-
labels.merge!(m["metric_labels"])
|
42
|
-
end
|
34
|
+
labels.merge!(phase: m["phase"]) if m["phase"]
|
35
|
+
labels.merge!(m["custom_labels"]) if m["custom_labels"]
|
36
|
+
labels.merge!(m["metric_labels"]) if m["metric_labels"]
|
43
37
|
|
44
38
|
PUMA_GAUGES.map do |k, help|
|
45
39
|
k = k.to_s
|
@@ -9,7 +9,7 @@ module PrometheusExporter::Server
|
|
9
9
|
pending_jobs: "Total number of pending Resque jobs.",
|
10
10
|
queues: "Total number of Resque queues.",
|
11
11
|
workers: "Total number of Resque workers running.",
|
12
|
-
working: "Total number of Resque workers working."
|
12
|
+
working: "Total number of Resque workers working.",
|
13
13
|
}
|
14
14
|
|
15
15
|
def initialize
|
@@ -1,10 +1,13 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require_relative
|
3
|
+
require_relative "../client"
|
4
4
|
|
5
5
|
module PrometheusExporter::Server
|
6
|
-
class RunnerException < StandardError
|
7
|
-
|
6
|
+
class RunnerException < StandardError
|
7
|
+
end
|
8
|
+
|
9
|
+
class WrongInheritance < RunnerException
|
10
|
+
end
|
8
11
|
|
9
12
|
class Runner
|
10
13
|
def initialize(options = {})
|
@@ -18,9 +21,7 @@ module PrometheusExporter::Server
|
|
18
21
|
@realm = nil
|
19
22
|
@histogram = nil
|
20
23
|
|
21
|
-
options.each
|
22
|
-
send("#{k}=", v) if self.class.method_defined?("#{k}=")
|
23
|
-
end
|
24
|
+
options.each { |k, v| send("#{k}=", v) if self.class.method_defined?("#{k}=") }
|
24
25
|
end
|
25
26
|
|
26
27
|
def start
|
@@ -34,27 +35,47 @@ module PrometheusExporter::Server
|
|
34
35
|
register_type_collectors
|
35
36
|
|
36
37
|
unless collector.is_a?(PrometheusExporter::Server::CollectorBase)
|
37
|
-
raise WrongInheritance,
|
38
|
+
raise WrongInheritance,
|
39
|
+
"Collector class must be inherited from PrometheusExporter::Server::CollectorBase"
|
38
40
|
end
|
39
41
|
|
40
42
|
if unicorn_listen_address && unicorn_pid_file
|
41
|
-
|
42
|
-
require_relative '../instrumentation'
|
43
|
+
require_relative "../instrumentation"
|
43
44
|
|
44
45
|
local_client = PrometheusExporter::LocalClient.new(collector: collector)
|
45
46
|
PrometheusExporter::Instrumentation::Unicorn.start(
|
46
47
|
pid_file: unicorn_pid_file,
|
47
48
|
listener_address: unicorn_listen_address,
|
48
|
-
client: local_client
|
49
|
+
client: local_client,
|
49
50
|
)
|
50
51
|
end
|
51
52
|
|
52
|
-
server =
|
53
|
+
server =
|
54
|
+
server_class.new(
|
55
|
+
port: port,
|
56
|
+
bind: bind,
|
57
|
+
collector: collector,
|
58
|
+
timeout: timeout,
|
59
|
+
verbose: verbose,
|
60
|
+
auth: auth,
|
61
|
+
realm: realm,
|
62
|
+
)
|
53
63
|
server.start
|
54
64
|
end
|
55
65
|
|
56
66
|
attr_accessor :unicorn_listen_address, :unicorn_pid_file
|
57
|
-
attr_writer :prefix,
|
67
|
+
attr_writer :prefix,
|
68
|
+
:port,
|
69
|
+
:bind,
|
70
|
+
:collector_class,
|
71
|
+
:type_collectors,
|
72
|
+
:timeout,
|
73
|
+
:verbose,
|
74
|
+
:server_class,
|
75
|
+
:label,
|
76
|
+
:auth,
|
77
|
+
:realm,
|
78
|
+
:histogram
|
58
79
|
|
59
80
|
def auth
|
60
81
|
@auth || nil
|
@@ -89,7 +110,7 @@ module PrometheusExporter::Server
|
|
89
110
|
end
|
90
111
|
|
91
112
|
def verbose
|
92
|
-
return @verbose if defined?
|
113
|
+
return @verbose if defined?(@verbose)
|
93
114
|
false
|
94
115
|
end
|
95
116
|
|
@@ -2,7 +2,6 @@
|
|
2
2
|
|
3
3
|
module PrometheusExporter::Server
|
4
4
|
class ShoryukenCollector < TypeCollector
|
5
|
-
|
6
5
|
def initialize
|
7
6
|
@shoryuken_jobs_total = nil
|
8
7
|
@shoryuken_job_duration_seconds = nil
|
@@ -16,8 +15,8 @@ module PrometheusExporter::Server
|
|
16
15
|
end
|
17
16
|
|
18
17
|
def collect(obj)
|
19
|
-
default_labels = { job_name: obj[
|
20
|
-
custom_labels = obj[
|
18
|
+
default_labels = { job_name: obj["name"], queue_name: obj["queue"] }
|
19
|
+
custom_labels = obj["custom_labels"]
|
21
20
|
labels = custom_labels.nil? ? default_labels : default_labels.merge(custom_labels)
|
22
21
|
|
23
22
|
ensure_shoryuken_metrics
|
@@ -30,10 +29,10 @@ module PrometheusExporter::Server
|
|
30
29
|
def metrics
|
31
30
|
if @shoryuken_jobs_total
|
32
31
|
[
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
32
|
+
@shoryuken_job_duration_seconds,
|
33
|
+
@shoryuken_jobs_total,
|
34
|
+
@shoryuken_restarted_jobs_total,
|
35
|
+
@shoryuken_failed_jobs_total,
|
37
36
|
]
|
38
37
|
else
|
39
38
|
[]
|
@@ -44,23 +43,29 @@ module PrometheusExporter::Server
|
|
44
43
|
|
45
44
|
def ensure_shoryuken_metrics
|
46
45
|
if !@shoryuken_jobs_total
|
47
|
-
|
48
46
|
@shoryuken_job_duration_seconds =
|
49
|
-
|
50
|
-
|
47
|
+
PrometheusExporter::Metric::Counter.new(
|
48
|
+
"shoryuken_job_duration_seconds",
|
49
|
+
"Total time spent in shoryuken jobs.",
|
50
|
+
)
|
51
51
|
|
52
52
|
@shoryuken_jobs_total =
|
53
|
-
|
54
|
-
|
53
|
+
PrometheusExporter::Metric::Counter.new(
|
54
|
+
"shoryuken_jobs_total",
|
55
|
+
"Total number of shoryuken jobs executed.",
|
56
|
+
)
|
55
57
|
|
56
58
|
@shoryuken_restarted_jobs_total =
|
57
|
-
|
58
|
-
|
59
|
+
PrometheusExporter::Metric::Counter.new(
|
60
|
+
"shoryuken_restarted_jobs_total",
|
61
|
+
"Total number of shoryuken jobs that we restarted because of a shoryuken shutdown.",
|
62
|
+
)
|
59
63
|
|
60
64
|
@shoryuken_failed_jobs_total =
|
61
|
-
|
62
|
-
|
63
|
-
|
65
|
+
PrometheusExporter::Metric::Counter.new(
|
66
|
+
"shoryuken_failed_jobs_total",
|
67
|
+
"Total number of failed shoryuken jobs.",
|
68
|
+
)
|
64
69
|
end
|
65
70
|
end
|
66
71
|
end
|
@@ -2,7 +2,6 @@
|
|
2
2
|
|
3
3
|
module PrometheusExporter::Server
|
4
4
|
class SidekiqCollector < TypeCollector
|
5
|
-
|
6
5
|
def initialize
|
7
6
|
@sidekiq_jobs_total = nil
|
8
7
|
@sidekiq_job_duration_seconds = nil
|
@@ -17,8 +16,8 @@ module PrometheusExporter::Server
|
|
17
16
|
end
|
18
17
|
|
19
18
|
def collect(obj)
|
20
|
-
default_labels = { job_name: obj[
|
21
|
-
custom_labels = obj[
|
19
|
+
default_labels = { job_name: obj["name"], queue: obj["queue"] }
|
20
|
+
custom_labels = obj["custom_labels"]
|
22
21
|
labels = custom_labels.nil? ? default_labels : default_labels.merge(custom_labels)
|
23
22
|
|
24
23
|
ensure_sidekiq_metrics
|
@@ -50,26 +49,35 @@ module PrometheusExporter::Server
|
|
50
49
|
|
51
50
|
def ensure_sidekiq_metrics
|
52
51
|
if !@sidekiq_jobs_total
|
53
|
-
|
54
52
|
@sidekiq_job_duration_seconds =
|
55
|
-
|
56
|
-
|
53
|
+
PrometheusExporter::Metric::Base.default_aggregation.new(
|
54
|
+
"sidekiq_job_duration_seconds",
|
55
|
+
"Total time spent in sidekiq jobs.",
|
56
|
+
)
|
57
57
|
|
58
58
|
@sidekiq_jobs_total =
|
59
|
-
|
60
|
-
|
59
|
+
PrometheusExporter::Metric::Counter.new(
|
60
|
+
"sidekiq_jobs_total",
|
61
|
+
"Total number of sidekiq jobs executed.",
|
62
|
+
)
|
61
63
|
|
62
64
|
@sidekiq_restarted_jobs_total =
|
63
|
-
|
64
|
-
|
65
|
+
PrometheusExporter::Metric::Counter.new(
|
66
|
+
"sidekiq_restarted_jobs_total",
|
67
|
+
"Total number of sidekiq jobs that we restarted because of a sidekiq shutdown.",
|
68
|
+
)
|
65
69
|
|
66
70
|
@sidekiq_failed_jobs_total =
|
67
|
-
|
68
|
-
|
71
|
+
PrometheusExporter::Metric::Counter.new(
|
72
|
+
"sidekiq_failed_jobs_total",
|
73
|
+
"Total number of failed sidekiq jobs.",
|
74
|
+
)
|
69
75
|
|
70
76
|
@sidekiq_dead_jobs_total =
|
71
|
-
|
72
|
-
|
77
|
+
PrometheusExporter::Metric::Counter.new(
|
78
|
+
"sidekiq_dead_jobs_total",
|
79
|
+
"Total number of dead sidekiq jobs.",
|
80
|
+
)
|
73
81
|
end
|
74
82
|
end
|
75
83
|
end
|