frankenstein 0.2.0 → 0.2.0.4.g676c8dd
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/frankenstein/collected_metric.rb +155 -0
- data/lib/frankenstein/server/webrick_logger.rb +12 -2
- data/lib/frankenstein/server.rb +4 -0
- metadata +5 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 5a1c3801006a61ff302b45a2d3e78d50372233ef
|
4
|
+
data.tar.gz: 7dc0bea23f93d05bbb240dfde8a6362c8760e966
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: aabab410d46c11061e28afd79867bd3ada573fa637ee26b3d18c3df0c74f149afbe84a9c6fcc95badd65a16c7c16cc2696c93aaf394cb8d894424b7f3c9591a7
|
7
|
+
data.tar.gz: 5ddc183c855c5ac4e28392e775657d34c6153ad3100d6b1fdc6126efa7e4e8ce5ba439111391328ab3a9a8e98dfce1babc96a93135da8c086c313315576b0381
|
@@ -0,0 +1,155 @@
|
|
1
|
+
require 'prometheus/client'
|
2
|
+
require 'prometheus/client/metric'
|
3
|
+
require 'logger'
|
4
|
+
|
5
|
+
module Frankenstein
|
6
|
+
# Populate metric data at scrape time
|
7
|
+
#
|
8
|
+
# The usual implementation of a Prometheus registry is to create and
|
9
|
+
# register a suite of metrics at program initialization, and then instrument
|
10
|
+
# the running code by setting/incrementing/decrementing the metrics and
|
11
|
+
# their label sets as the program runs.
|
12
|
+
#
|
13
|
+
# Sometimes, however, your program itself doesn't actually interact with the
|
14
|
+
# values that you want to return in your metrics, such as the counts of some
|
15
|
+
# external resource. You can hack around this by running something
|
16
|
+
# periodically in a thread to poll the external resource and update the
|
17
|
+
# value, but that's icky.
|
18
|
+
#
|
19
|
+
# Instead, this class provides you with a way to say, "whenever we're
|
20
|
+
# scraped, run this block of code to generate the label sets and current
|
21
|
+
# values, and return that as part of the scrape data". This allows you to
|
22
|
+
# do away with ugly polling threads, and instead just write a simple "gather
|
23
|
+
# some data and return some numbers" block.
|
24
|
+
#
|
25
|
+
# The block to run is passed to the Frankenstein::CollectedMetric
|
26
|
+
# constructor, and *must* return a hash, containing the labelsets and
|
27
|
+
# associated numeric values you want to return for the scrape. If your
|
28
|
+
# block doesn't send back a hash, or raises an exception during execution,
|
29
|
+
# no values will be returned for the metric, an error will be logged (if a
|
30
|
+
# logger was specified), and the value of the
|
31
|
+
# `<metric>_collection_errors_total` counter, labelled by the exception
|
32
|
+
# `class`, will be incremented.
|
33
|
+
#
|
34
|
+
# @example Returning a database query
|
35
|
+
#
|
36
|
+
# Frankenstein::CollectedMetric.new(:my_db_query, "The results of a DB query") do
|
37
|
+
# ActiveRecord::Base.connection.execute("SELECT name,class,value FROM some_table").each_with_object do |row, h|
|
38
|
+
# h[name: row['name'], class: row['class']] = row['value']
|
39
|
+
# end
|
40
|
+
# end
|
41
|
+
#
|
42
|
+
#
|
43
|
+
# # Performance & Concurrency
|
44
|
+
#
|
45
|
+
# Bear in mind that the code that you specify for the collection action will
|
46
|
+
# be run *on every scrape*; if you've got two Prometheus servers, with a
|
47
|
+
# scrape interval of 30 seconds, you'll be running this code once every 15
|
48
|
+
# seconds, forever. Also, Prometheus scrapes have a default timeout of five
|
49
|
+
# seconds. So, whatever your collection code does, make it snappy and
|
50
|
+
# low-overhead.
|
51
|
+
#
|
52
|
+
# On a related note, remember that scrapes can arrive in parallel, so your
|
53
|
+
# collection code could potentially be running in parallel, too (depending
|
54
|
+
# on your metrics server). Thus, it must be thread-safe -- preferably, it
|
55
|
+
# should avoid mutating shared state at all.
|
56
|
+
#
|
57
|
+
class CollectedMetric < Prometheus::Client::Metric
|
58
|
+
# The type of the metric being collected.
|
59
|
+
attr_reader :type
|
60
|
+
|
61
|
+
# @param name [Symbol] the name of the metric to collect for. This must
|
62
|
+
# follow all the normal rules for a Prometheus metric name, and should
|
63
|
+
# meet [the guidelines for metric naming](https://prometheus.io/docs/practices/naming/),
|
64
|
+
# unless you like being shunned at parties.
|
65
|
+
#
|
66
|
+
# @param docstring [#to_s] the descriptive help text for the metric.
|
67
|
+
#
|
68
|
+
# @param type [Symbol] what type of metric you're returning. It's uncommon
|
69
|
+
# to want anything other than `:gauge` here (the default), because
|
70
|
+
# when you're collecting external data it's uncommon to be able to
|
71
|
+
# trust that your external data source will behave like a proper
|
72
|
+
# counter (or histogram or summary), but if you want the flexibility,
|
73
|
+
# it's there for you. If you do decide to try your hand at collecting
|
74
|
+
# a histogram or summary, bear in mind that the value that you need to
|
75
|
+
# return is not a number, or even a hash -- it's a Prometheus-internal
|
76
|
+
# class instance, and dealing with the intricacies of that is entirely
|
77
|
+
# up to you.
|
78
|
+
#
|
79
|
+
# @param logger [Logger] if you want to know what's going on inside your
|
80
|
+
# metric, you can pass a logger and see what's going on. Otherwise,
|
81
|
+
# you'll be blind if anything goes badly wrong. Up to you.
|
82
|
+
#
|
83
|
+
# @param registry [Prometheus::Client::Registry] the registry in which
|
84
|
+
# this metric will reside. The `<metric>_collection_errors_total`
|
85
|
+
# metric will also be registered here, so you'll know if a collection
|
86
|
+
# fails.
|
87
|
+
#
|
88
|
+
# @param collector [Proc] the code to run on every scrape request.
|
89
|
+
#
|
90
|
+
def initialize(name, docstring, type: :gauge, logger: Logger.new('/dev/null'), registry: Prometheus::Client.registry, &collector)
|
91
|
+
@validator = Prometheus::Client::LabelSetValidator.new
|
92
|
+
|
93
|
+
validate_name(name)
|
94
|
+
validate_docstring(docstring)
|
95
|
+
|
96
|
+
@name = name
|
97
|
+
@docstring = docstring
|
98
|
+
@base_labels = {}
|
99
|
+
|
100
|
+
validate_type(type)
|
101
|
+
|
102
|
+
@type = type
|
103
|
+
@logger = logger
|
104
|
+
@registry = registry
|
105
|
+
@collector = collector
|
106
|
+
|
107
|
+
@errors_metric = @registry.counter(:"#{@name}_collection_errors_total", "Errors encountered while collecting for #{@name}")
|
108
|
+
@registry.register(self)
|
109
|
+
end
|
110
|
+
|
111
|
+
# Retrieve the value for the given labelset.
|
112
|
+
#
|
113
|
+
def get(labels = {})
|
114
|
+
@validator.validate(labels)
|
115
|
+
|
116
|
+
values[labels]
|
117
|
+
end
|
118
|
+
|
119
|
+
# Retrieve a complete set of labels and values for the metric.
|
120
|
+
#
|
121
|
+
def values
|
122
|
+
begin
|
123
|
+
@collector.call(self).tap do |results|
|
124
|
+
unless results.is_a?(Hash)
|
125
|
+
@logger.error(progname) { "Collector proc did not return a hash, got #{results.inspect}" }
|
126
|
+
@errors_metric.increment(class: "NotAHashError")
|
127
|
+
return {}
|
128
|
+
end
|
129
|
+
results.keys.each { |labelset| @validator.validate(labelset) }
|
130
|
+
end
|
131
|
+
rescue StandardError => ex
|
132
|
+
@logger.error(progname) { (["Exception in collection: #{ex.message} (#{ex.class})"] + ex.backtrace).join("\n ") }
|
133
|
+
@errors_metric.increment(class: ex.class.to_s)
|
134
|
+
|
135
|
+
{}
|
136
|
+
end
|
137
|
+
end
|
138
|
+
|
139
|
+
private
|
140
|
+
|
141
|
+
# Make sure that the type we were passed is one Prometheus is known to accept.
|
142
|
+
#
|
143
|
+
def validate_type(type)
|
144
|
+
unless %i{gauge counter histogram summary}.include?(type)
|
145
|
+
raise ArgumentError, "type must be one of :gauge, :counter, :histogram, or :summary (got #{type.inspect})"
|
146
|
+
end
|
147
|
+
end
|
148
|
+
|
149
|
+
# Generate the logger progname.
|
150
|
+
#
|
151
|
+
def progname
|
152
|
+
@progname ||= "Frankenstein::CollectedMetric(#{@name})".freeze
|
153
|
+
end
|
154
|
+
end
|
155
|
+
end
|
@@ -39,8 +39,13 @@ module Frankenstein
|
|
39
39
|
end
|
40
40
|
|
41
41
|
%i{debug error fatal info warn}.each do |sev|
|
42
|
-
define_method(sev) do |msg, &blk|
|
43
|
-
if blk
|
42
|
+
define_method(sev) do |msg = nil, &blk|
|
43
|
+
if msg && blk
|
44
|
+
# This never happens in webrick now, but they might get the memo
|
45
|
+
# one day
|
46
|
+
@logger.__send__(sev, msg, &blk)
|
47
|
+
elsif blk
|
48
|
+
# I can't find any of these, either, but I live in hope
|
44
49
|
@logger.__send__(sev, @progname, &blk)
|
45
50
|
else
|
46
51
|
@logger.__send__(sev, @progname) { msg }
|
@@ -48,6 +53,11 @@ module Frankenstein
|
|
48
53
|
end
|
49
54
|
end
|
50
55
|
|
56
|
+
# Simulate the "append literal message" feature
|
57
|
+
#
|
58
|
+
# Nothing goes into *my* logs without having appropriate metadata attached,
|
59
|
+
# so this just funnels these messages into the proper priority-based system.
|
60
|
+
#
|
51
61
|
def <<(msg)
|
52
62
|
@logger.add(@priority, msg, @progname)
|
53
63
|
end
|
data/lib/frankenstein/server.rb
CHANGED
@@ -82,7 +82,9 @@ module Frankenstein
|
|
82
82
|
@server = WEBrick::HTTPServer.new(Logger: wrapped_logger, BindAddress: nil, Port: @port, AccessLog: [[wrapped_logger, WEBrick::AccessLog::COMMON_LOG_FORMAT]])
|
83
83
|
@server.mount "/", Rack::Handler::WEBrick, app
|
84
84
|
rescue => ex
|
85
|
+
#:nocov:
|
85
86
|
@logger.fatal("Frankenstein::Server#run") { (["Exception while trying to create WEBrick::HTTPServer: #{ex.message} (#{ex.class})"] + ex.backtrace).join("\n ") }
|
87
|
+
#:nocov:
|
86
88
|
ensure
|
87
89
|
@op_cv.signal
|
88
90
|
end
|
@@ -91,7 +93,9 @@ module Frankenstein
|
|
91
93
|
begin
|
92
94
|
@server.start if @server
|
93
95
|
rescue => ex
|
96
|
+
#:nocov:
|
94
97
|
@logger.fatal("Frankenstein::Server#run") { (["Exception while running WEBrick::HTTPServer: #{ex.message} (#{ex.class})"] + ex.backtrace).join("\n ") }
|
98
|
+
#:nocov:
|
95
99
|
end
|
96
100
|
end
|
97
101
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: frankenstein
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.2.0
|
4
|
+
version: 0.2.0.4.g676c8dd
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Matt Palmer
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2018-
|
11
|
+
date: 2018-03-08 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: prometheus-client
|
@@ -226,6 +226,7 @@ files:
|
|
226
226
|
- README.md
|
227
227
|
- frankenstein.gemspec
|
228
228
|
- lib/frankenstein.rb
|
229
|
+
- lib/frankenstein/collected_metric.rb
|
229
230
|
- lib/frankenstein/error.rb
|
230
231
|
- lib/frankenstein/request.rb
|
231
232
|
- lib/frankenstein/server.rb
|
@@ -244,9 +245,9 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
244
245
|
version: 2.3.0
|
245
246
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
246
247
|
requirements:
|
247
|
-
- - "
|
248
|
+
- - ">"
|
248
249
|
- !ruby/object:Gem::Version
|
249
|
-
version:
|
250
|
+
version: 1.3.1
|
250
251
|
requirements: []
|
251
252
|
rubyforge_project:
|
252
253
|
rubygems_version: 2.6.13
|