vinted-resque-metrics 0.0.7

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: 062fadf259abb6e87ffd46519d5195aa2acb7816
4
+ data.tar.gz: 1064e5f84426606cd0d89a252946d01665d69c3b
5
+ SHA512:
6
+ metadata.gz: 4724dacbff95e8496ff18cf87e69d9978f660b2297af79248f4635883f97167c9aaf88119308a5f36dbc66ffa6d2e7513023d2512c6bb5e9b40912eb6ff65990
7
+ data.tar.gz: d87b7c305d024dbc259f503c8415d99412054ae9c44c6eead45a37d399634c5d90a6837e577019a7be4681f4babd800cfca7b742fc52f125f6bd067965fefb3a
data/.document ADDED
@@ -0,0 +1,5 @@
1
+ lib/**/*.rb
2
+ bin/*
3
+ -
4
+ features/**/*.feature
5
+ LICENSE.txt
data/Gemfile ADDED
@@ -0,0 +1,17 @@
1
+ source "http://rubygems.org"
2
+
3
+ gem 'resque', '~>1.19'
4
+ # Add dependencies required to use your gem here.
5
+ # Example:
6
+ # gem "activesupport", ">= 2.3.5"
7
+
8
+ # Add dependencies to develop your gem here.
9
+ # Include everything needed to run rake, tests, features, etc.
10
+ group :development do
11
+ platform :mri_19 do
12
+ gem "ruby-debug19"
13
+ end
14
+ gem 'minitest', '>=2'
15
+ gem "bundler"
16
+ gem "jeweler"
17
+ end
data/Gemfile.lock ADDED
@@ -0,0 +1,81 @@
1
+ GEM
2
+ remote: http://rubygems.org/
3
+ specs:
4
+ addressable (2.3.5)
5
+ archive-tar-minitar (0.5.2)
6
+ builder (3.2.2)
7
+ columnize (0.3.4)
8
+ faraday (0.7.6)
9
+ addressable (~> 2.2)
10
+ multipart-post (~> 1.1)
11
+ rack (~> 1.1)
12
+ git (1.2.6)
13
+ github_api (0.4.10)
14
+ faraday (~> 0.7.6)
15
+ hashie (~> 1.2.0)
16
+ multi_json (~> 1.0)
17
+ oauth2 (~> 0.5.2)
18
+ hashie (1.2.0)
19
+ highline (1.6.20)
20
+ jeweler (2.0.1)
21
+ builder
22
+ bundler (>= 1.0)
23
+ git (>= 1.2.5)
24
+ github_api
25
+ highline (>= 1.6.15)
26
+ nokogiri (>= 1.5.10)
27
+ rake
28
+ rdoc
29
+ json (1.8.1)
30
+ linecache19 (0.5.12)
31
+ ruby_core_source (>= 0.1.4)
32
+ mini_portile (0.5.2)
33
+ minitest (2.8.0)
34
+ multi_json (1.0.3)
35
+ multipart-post (1.2.0)
36
+ nokogiri (1.6.1)
37
+ mini_portile (~> 0.5.0)
38
+ oauth2 (0.5.2)
39
+ faraday (~> 0.7)
40
+ multi_json (~> 1.0)
41
+ rack (1.3.5)
42
+ rack-protection (1.1.4)
43
+ rack
44
+ rake (10.1.1)
45
+ rdoc (4.1.1)
46
+ json (~> 1.4)
47
+ redis (2.2.2)
48
+ redis-namespace (1.0.3)
49
+ redis (< 3.0.0)
50
+ resque (1.19.0)
51
+ multi_json (~> 1.0)
52
+ redis-namespace (~> 1.0.2)
53
+ sinatra (>= 0.9.2)
54
+ vegas (~> 0.1.2)
55
+ ruby-debug-base19 (0.11.25)
56
+ columnize (>= 0.3.1)
57
+ linecache19 (>= 0.5.11)
58
+ ruby_core_source (>= 0.1.4)
59
+ ruby-debug19 (0.11.6)
60
+ columnize (>= 0.3.1)
61
+ linecache19 (>= 0.5.11)
62
+ ruby-debug-base19 (>= 0.11.19)
63
+ ruby_core_source (0.1.5)
64
+ archive-tar-minitar (>= 0.5.2)
65
+ sinatra (1.3.1)
66
+ rack (~> 1.3, >= 1.3.4)
67
+ rack-protection (~> 1.1, >= 1.1.2)
68
+ tilt (~> 1.3, >= 1.3.3)
69
+ tilt (1.3.3)
70
+ vegas (0.1.8)
71
+ rack (>= 1.0.0)
72
+
73
+ PLATFORMS
74
+ ruby
75
+
76
+ DEPENDENCIES
77
+ bundler
78
+ jeweler
79
+ minitest (>= 2)
80
+ resque (~> 1.19)
81
+ ruby-debug19
data/LICENSE.txt ADDED
@@ -0,0 +1,20 @@
1
+ Copyright (c) 2011 Aaron Quint
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining
4
+ a copy of this software and associated documentation files (the
5
+ "Software"), to deal in the Software without restriction, including
6
+ without limitation the rights to use, copy, modify, merge, publish,
7
+ distribute, sublicense, and/or sell copies of the Software, and to
8
+ permit persons to whom the Software is furnished to do so, subject to
9
+ the following conditions:
10
+
11
+ The above copyright notice and this permission notice shall be
12
+ included in all copies or substantial portions of the Software.
13
+
14
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
data/README.rdoc ADDED
@@ -0,0 +1,165 @@
1
+ = resque-metrics
2
+
3
+ A simple Resque plugin that times and saves some simple metrics for Resque jobs back into redis. Based on this system
4
+ you could build some simple auto-scaling mechanism based on the speed and ETA of queues. Also includes a hook/callback
5
+ mechanism for recording/sending the metrics to your favorite tool (AKA statsd/graphite).
6
+
7
+ == Installation
8
+
9
+ gem install resque-metrics
10
+
11
+ == Usage
12
+
13
+ Given a job, extend the job class with Resque::Metrics.
14
+
15
+ class SomeJob
16
+ extend ::Resque::Metrics
17
+
18
+ @queue = :jobs
19
+
20
+ def self.perform(x, y)
21
+ # sleep 10
22
+ end
23
+
24
+ end
25
+
26
+ By default this will record the total job count, the total count of jobs enqueued, the total time the jobs took, the avg time the jobs took. It will also record the total number of job failures.
27
+ These metrics are also tracked by queue and job class. So for the job above, it will record values and you will be able to fetch them
28
+ with module methods:
29
+
30
+ Resque::Metrics.total_job_count #=> 1
31
+ Resque::Metrics.total_job_count_by_job(SomeJob) #=> 1
32
+ Resque::Metrics.total_job_count_by_queue(:jobs) #=> 10000
33
+ Resque::Metrics.total_job_time #=> 10000
34
+ Resque::Metrics.total_job_time_by_job(SomeJob) #=> 10000
35
+ Resque::Metrics.total_job_time_by_queue(:jobs) #=> 10000
36
+ Resque::Metrics.avg_job_time #=> 1000
37
+ Resque::Metrics.avg_job_time_by_job(SomeJob) #=> 1000
38
+ Resque::Metrics.avg_job_time_by_queue(:jobs) #=> 1000
39
+ Resque::Metrics.failed_job_count #=> 1
40
+ Resque::Metrics.failed_job_count_by_job(SomeJob) #=> 0
41
+ Resque::Metrics.failed_job_count_by_queue(:jobs) #=> 0
42
+
43
+ All values are recorded and returned as integers. For times, values are in milliseconds.
44
+
45
+ === Forking Metrics
46
+
47
+ Resque::Metrics can also record forking metrics but these are not on by default as `before_fork` and `after_fork` are singluar hooks.
48
+ If you don't need to define your own fork hooks you can simply add a line to an initializer:
49
+
50
+ Resque::Metrics.watch_fork
51
+
52
+ If you do define you're own fork hooks:
53
+
54
+ Resque.before_fork do |job|
55
+ # my own fork code
56
+ Resque::Metrics.before_fork.call(job)
57
+ end
58
+
59
+ # Resque::Metrics.(before/after)_fork just returns a lambda so just assign it if you like
60
+ Resque.after_fork = Resque::Metrics.after_fork
61
+
62
+ Once enabled this will add `.*_fork_*` methods like `avg_fork_time`, etc.
63
+ Latest Resque is required for fork recording to work.
64
+
65
+ === Queue Depth Metrics
66
+
67
+ Resque::Metrics can also record queue depth metrics. These are not on by default,
68
+ as they need to run on an interval to be useful. You can record them manually
69
+ by running in a console:
70
+
71
+ Resque::Metrics.record_depth
72
+
73
+ You can imagine placing this in a small script, and using cron to run it. Once you'll have access to:
74
+
75
+ Resque::Metrics.failed_depth #=> 1
76
+ Resque::Metrics.pending_depth #=> 1
77
+ Resque::Metrics.depth_by_queue(:jobs) #=> 1
78
+
79
+ === Metric Backends
80
+
81
+ By default, Resque::Metrics keeps all it's metrics in Resque's redis instance, but supports plugging in other backends. Resque::Metrics itself supports redis and statsd. Here's how you would enable statsd:
82
+
83
+ # list current backends
84
+ Resque::Metrics.backends
85
+ # build your statsd instance
86
+ statsd = Statsd.new 'localhost', 8125
87
+ # add a Resque::Metrics::Backend
88
+ Resque::Metrics.backends.append Resque::Metrics::Backends::Statsd.new(statsd)
89
+
90
+ ==== Statsd
91
+
92
+ If you have already have a statsd object for you application, just pass it to Resque::Metrics::Backends::Statsd. The statsd client already supports namespacing, and in addition, Resque::Metrics all its metrics under 'resque' under that namespace.
93
+
94
+ Here's a list of metrics emitted:
95
+
96
+ resque.job.<job>.complete.count
97
+ resque.job.<job>.complete.time
98
+ resque.queue.<queue>.complete.count
99
+ resque.queue.<queue>.complete.time
100
+ resque.complete.count
101
+ resque.complete.time
102
+
103
+ resque.job.<job>.enqueue.count
104
+ resque.job.<job>.enqueue.time
105
+ resque.queue.<queue>.enqueue.count
106
+ resque.queue.<queue>.enqueue.time
107
+ resque.enqueue.count
108
+ resque.enqueue.time
109
+
110
+ resque.job.<job>.fork.count
111
+ resque.job.<job>.fork.time
112
+ resque.queue.<queue>.fork.count
113
+ resque.queue.<queue>.fork.time
114
+ resque.fork.count
115
+ resque.fork.time
116
+
117
+ resque.job.<job>.failure.count
118
+ resque.queue.<queue>.failure.count
119
+ resque.failure.count
120
+
121
+ resque.depth.failed
122
+ resque.depth.pending
123
+ resque.depth.queue.<queue>
124
+
125
+ ==== Writing your own
126
+
127
+ To write your own, you create your own class, and then implmement the following that you care about:
128
+
129
+ * increment_metric(metric, by = 1)
130
+ * set_metric(metric, val)
131
+ * set_avg(metric, num, total)
132
+ * get_metric(metric)
133
+
134
+ Resque::Metrics will in turn call each of these methods for each of it's backend
135
+ if it responds_to? it. For get_metric, since it returns a value, only will use
136
+ the first backend that responds_to? it.
137
+
138
+ === Callbacks/Hooks
139
+
140
+ Resque::Metrics also has a simple callback/hook system so you can send data to your favorite agent. All hooks are passed the job class,
141
+ the queue, and the time of the metric.
142
+
143
+ # Also `on_job_fork`, `on_job_enqueue`, and `on_job_failure` (`on_job_failure does not include `time`)
144
+ Resque::Metrics.on_job_complete do |job_class, queue, time|
145
+ # send to your metrics agent
146
+ Statsd.timing "resque.#{job_class}.complete_time", time
147
+ Statsd.increment "resque.#{job_class}.complete"
148
+ # etc
149
+ end
150
+
151
+ == Contributing to resque-metrics
152
+
153
+ * Check out the latest master to make sure the feature hasn't been implemented or the bug hasn't been fixed yet
154
+ * Check out the issue tracker to make sure someone already hasn't requested it and/or contributed it
155
+ * Fork the project
156
+ * Start a feature/bugfix branch
157
+ * Commit and push until you are happy with your contribution
158
+ * Make sure to add tests for it. This is important so I don't break it in a future version unintentionally.
159
+ * Please try not to mess with the Rakefile, version, or history. If you want to have your own version, or is otherwise necessary, that is fine, but please isolate to its own commit so I can cherry-pick around it.
160
+
161
+ == Copyright
162
+
163
+ Copyright (c) 2011 Aaron Quint. See LICENSE.txt for
164
+ further details.
165
+
data/Rakefile ADDED
@@ -0,0 +1,39 @@
1
+ # encoding: utf-8
2
+
3
+ require 'rubygems'
4
+ require 'bundler'
5
+ begin
6
+ Bundler.setup(:default, :development)
7
+ rescue Bundler::BundlerError => e
8
+ $stderr.puts e.message
9
+ $stderr.puts "Run `bundle install` to install missing gems"
10
+ exit e.status_code
11
+ end
12
+ require 'rake'
13
+
14
+ require 'jeweler'
15
+ Jeweler::Tasks.new do |gem|
16
+ # gem is a Gem::Specification... see http://docs.rubygems.org/read/chapter/20 for more options
17
+ gem.name = "resque-metrics"
18
+ gem.homepage = "http://github.com/quirkey/resque-metrics"
19
+ gem.license = "MIT"
20
+ gem.summary = %Q{A Resque plugin for recording simple metrics for your jobs}
21
+ gem.description = <<-desc
22
+ A simple Resque plugin that times and saves some simple metrics for Resque jobs back into redis. Based on this system
23
+ you could build some simple auto-scaling mechanism based on the speed and ETA of queues. Also includes a hook/callback
24
+ mechanism for recording/sending the metrics to your favorite tool (AKA statsd/graphite).
25
+ desc
26
+ gem.email = "aaron@quirkey.com"
27
+ gem.authors = ["Aaron Quint"]
28
+ # dependencies defined in Gemfile
29
+ end
30
+ Jeweler::RubygemsDotOrgTasks.new
31
+
32
+ require 'rake/testtask'
33
+ Rake::TestTask.new(:test) do |test|
34
+ test.libs << 'lib' << 'test'
35
+ test.pattern = 'test/**/test_*.rb'
36
+ test.verbose = true
37
+ end
38
+
39
+ task :default => :test
data/VERSION ADDED
@@ -0,0 +1 @@
1
+ 0.0.6
@@ -0,0 +1 @@
1
+ require 'resque/metrics'
@@ -0,0 +1,360 @@
1
+ require 'resque'
2
+ require 'resque/metrics/backends'
3
+
4
+ module Resque
5
+ module Metrics
6
+
7
+ def self.extended(klass)
8
+ klass.extend(Hooks)
9
+ end
10
+
11
+ def self.redis
12
+ @_redis ||= ::Resque.redis
13
+ end
14
+
15
+ def self.redis=(redis)
16
+ @_redis = redis
17
+ end
18
+
19
+ def self.use_multi=(multi)
20
+ @_use_multi = multi
21
+ end
22
+
23
+ def self.use_multi?
24
+ @_use_multi
25
+ end
26
+
27
+ def self.backends
28
+ @_backends ||= begin
29
+ self.backends = [Resque::Metrics::Backends::Redis.new(redis)]
30
+ end
31
+ end
32
+
33
+ def self.backends=(new_backends)
34
+ @_backends = new_backends
35
+ end
36
+
37
+ def self.run_backends(method, *args)
38
+ ran_any = false
39
+
40
+ backends.each do |backend|
41
+ if backend.respond_to?(method)
42
+ ran_any = true
43
+ backend.send method, *args
44
+ end
45
+ end
46
+
47
+ raise "No backend responded to #{method}: #{backends.inspect}" unless ran_any
48
+ end
49
+
50
+ def self.run_first_backend(method, *args)
51
+ backend = backends.detect {|backend| backend.respond_to?(method)}
52
+ raise "No backend responds to #{method}: #{backends.inspect}" unless backend
53
+
54
+ backend.send method, *args
55
+ end
56
+
57
+ def self.watch_fork
58
+ ::Resque.before_fork = before_fork
59
+ ::Resque.after_fork = after_fork
60
+ end
61
+
62
+ def self.on_job_fork(&block)
63
+ set_callback(:on_job_fork, &block)
64
+ end
65
+
66
+ def self.on_job_complete(&block)
67
+ set_callback(:on_job_complete, &block)
68
+ end
69
+
70
+ def self.on_job_enqueue(&block)
71
+ set_callback(:on_job_enqueue, &block)
72
+ end
73
+
74
+ def self.on_job_failure(&block)
75
+ set_callback(:on_job_failure, &block)
76
+ end
77
+
78
+ def self.set_callback(callback_name, &block)
79
+ @callbacks ||= {}
80
+ @callbacks[callback_name] ||= []
81
+ @callbacks[callback_name] << block
82
+ end
83
+
84
+ def self.run_callback(callback_name, *args)
85
+ if @callbacks && @callbacks[callback_name]
86
+ @callbacks[callback_name].each {|callback| callback.call(*args) }
87
+ end
88
+ end
89
+
90
+ def self.before_fork
91
+ lambda do |job|
92
+ start = Time.now.to_f * 1000
93
+ key = "_metrics_:fork_start:#{job.worker.to_s}"
94
+ ::Resque.redis.set key, start
95
+ ::Resque.redis.expire key, 60 * 60 * 60
96
+ true
97
+ end
98
+ end
99
+
100
+ def self.after_fork
101
+ lambda do |job|
102
+ end_time = Time.now.to_f * 1000
103
+ key = "_metrics_:fork_start:#{job.worker.to_s}"
104
+ start_time = ::Resque.redis.get key
105
+ if start_time
106
+ total = (end_time - start_time.to_f).to_i
107
+ ::Resque::Metrics.record_job_fork(job, total)
108
+ end
109
+ true
110
+ end
111
+ end
112
+
113
+ def self.record_depth
114
+ set_metric 'depth:failed', Resque::Failure.count
115
+ set_metric 'depth:pending', Resque.info[:pending]
116
+
117
+ Resque.queues.each do |queue|
118
+ set_metric "depth:queue:#{queue}", Resque.size(queue)
119
+ end
120
+
121
+ true
122
+ end
123
+
124
+ def self.record_job_fork(job, time)
125
+ job_class = job.payload_class
126
+ queue = job.queue
127
+ multi do
128
+ increment_metric "fork_time", time
129
+ increment_metric "fork_time:queue:#{queue}", time
130
+ increment_metric "fork_time:job:#{job_class}", time
131
+ increment_metric "fork_count"
132
+ increment_metric "fork_count:queue:#{queue}"
133
+ increment_metric "fork_count:job:#{job_class}"
134
+ end
135
+ set_avg "avg_fork_time", total_fork_time , total_fork_count
136
+ set_avg "avg_fork_time:queue:#{queue}", total_fork_time_by_queue(queue) , total_fork_count_by_queue(queue)
137
+ set_avg "avg_fork_time:job:#{job_class}", total_fork_time_by_job(job_class) , total_fork_count_by_job(job_class)
138
+ run_callback(:on_job_fork, job_class, queue, time)
139
+ end
140
+
141
+ def self.record_job_enqueue(job_class, *args)
142
+ queue = Resque.queue_from_class(job_class)
143
+ increment_metric "enqueue_count"
144
+ increment_metric "enqueue_count:job:#{job_class}"
145
+ increment_metric "enqueue_count:queue:#{queue}"
146
+
147
+ size = Resque.encode(args).length
148
+ multi do
149
+ increment_metric "payload_size", size
150
+ increment_metric "payload_size:queue:#{queue}", size
151
+ increment_metric "payload_size:job:#{job_class}", size
152
+ end
153
+ set_avg "avg_payload_size", total_payload_size , total_enqueue_count
154
+ set_avg "avg_payload_size:queue:#{queue}", total_payload_size_by_queue(queue) , total_enqueue_count_by_queue(queue)
155
+ set_avg "avg_payload_size:job:#{job_class}", total_payload_size_by_job(job_class) , total_enqueue_count_by_job(job_class)
156
+ run_callback(:on_job_enqueue, job_class, queue, size)
157
+ true
158
+ end
159
+
160
+ def self.record_job_completion(job_class, time)
161
+ queue = Resque.queue_from_class(job_class)
162
+ multi do
163
+ increment_metric "job_time", time
164
+ increment_metric "job_time:queue:#{queue}", time
165
+ increment_metric "job_time:job:#{job_class}", time
166
+ increment_metric "job_count"
167
+ increment_metric "job_count:queue:#{queue}"
168
+ increment_metric "job_count:job:#{job_class}"
169
+ end
170
+ set_avg "avg_job_time", total_job_time, total_job_count
171
+ set_avg "avg_job_time:queue:#{queue}", total_job_time_by_queue(queue) , total_job_count_by_queue(queue)
172
+ set_avg "avg_job_time:job:#{job_class}", total_job_time_by_job(job_class) , total_job_count_by_job(job_class)
173
+ run_callback(:on_job_complete, job_class, queue, time)
174
+ end
175
+
176
+ def self.record_job_failure(job_class, e)
177
+ queue = Resque.queue_from_class(job_class)
178
+
179
+ multi do
180
+ increment_metric "failed_job_count"
181
+ increment_metric "failed_job_count:queue:#{queue}"
182
+ increment_metric "failed_job_count:job:#{job_class}"
183
+ end
184
+
185
+ run_callback(:on_job_failure, job_class, queue)
186
+ end
187
+
188
+ def self.multi(&block)
189
+ use_multi? ? redis.multi(&block) : yield
190
+ end
191
+
192
+ def self.increment_metric(metric, by = 1)
193
+ run_backends(:increment_metric, metric, by)
194
+ end
195
+
196
+ def self.set_metric(metric, val)
197
+ run_backends(:set_metric, metric, val)
198
+ end
199
+
200
+ def self.set_avg(metric, num, total)
201
+ run_backends(:set_avg, metric, num, total)
202
+ end
203
+
204
+ def self.get_metric(metric)
205
+ run_first_backend(:get_metric, metric)
206
+ end
207
+
208
+ def self.total_enqueue_count
209
+ get_metric "enqueue_count"
210
+ end
211
+
212
+ def self.total_enqueue_count_by_job(job)
213
+ get_metric "enqueue_count:job:#{job}"
214
+ end
215
+
216
+ def self.total_enqueue_count_by_queue(queue)
217
+ get_metric "enqueue_count:queue:#{queue}"
218
+ end
219
+
220
+ def self.avg_job_time
221
+ get_metric "avg_job_time"
222
+ end
223
+
224
+ def self.avg_job_time_by_queue(queue)
225
+ get_metric "avg_job_time:queue:#{queue}"
226
+ end
227
+
228
+ def self.avg_job_time_by_job(job)
229
+ get_metric "avg_job_time:job:#{job}"
230
+ end
231
+
232
+ def self.total_job_time
233
+ get_metric "job_time"
234
+ end
235
+
236
+ def self.total_job_time_by_queue(queue)
237
+ get_metric "job_time:queue:#{queue}"
238
+ end
239
+
240
+ def self.total_job_time_by_job(job)
241
+ get_metric "job_time:job:#{job}"
242
+ end
243
+
244
+ def self.total_job_count
245
+ get_metric "job_count"
246
+ end
247
+
248
+ def self.total_job_count_by_queue(queue)
249
+ get_metric "job_count:queue:#{queue}"
250
+ end
251
+
252
+ def self.total_job_count_by_job(job)
253
+ get_metric "job_count:job:#{job}"
254
+ end
255
+
256
+ def self.failed_job_count
257
+ get_metric "failed_job_count"
258
+ end
259
+
260
+ def self.failed_job_count_by_queue(queue)
261
+ get_metric "failed_job_count:queue:#{queue}"
262
+ end
263
+
264
+ def self.failed_job_count_by_job(job)
265
+ get_metric "failed_job_count:job:#{job}"
266
+ end
267
+
268
+ def self.total_payload_size
269
+ get_metric "payload_size"
270
+ end
271
+
272
+ def self.total_payload_size_by_queue(queue)
273
+ get_metric "payload_size:queue:#{queue}"
274
+ end
275
+
276
+ def self.total_payload_size_by_job(job)
277
+ get_metric "payload_size:job:#{job}"
278
+ end
279
+
280
+ def self.avg_payload_size
281
+ get_metric "avg_payload_size"
282
+ end
283
+
284
+ def self.avg_payload_size_by_queue(queue)
285
+ get_metric "avg_payload_size:queue:#{queue}"
286
+ end
287
+
288
+ def self.avg_payload_size_by_job(job)
289
+ get_metric "avg_payload_size:job:#{job}"
290
+ end
291
+
292
+ def self.avg_fork_time
293
+ get_metric "avg_fork_time"
294
+ end
295
+
296
+ def self.avg_fork_time_by_queue(queue)
297
+ get_metric "avg_fork_time:queue:#{queue}"
298
+ end
299
+
300
+ def self.avg_fork_time_by_job(job)
301
+ get_metric "avg_fork_time:job:#{job}"
302
+ end
303
+
304
+ def self.total_fork_time
305
+ get_metric "fork_time"
306
+ end
307
+
308
+ def self.total_fork_time_by_queue(queue)
309
+ get_metric "fork_time:queue:#{queue}"
310
+ end
311
+
312
+ def self.total_fork_time_by_job(job)
313
+ get_metric "fork_time:job:#{job}"
314
+ end
315
+
316
+ def self.total_fork_count
317
+ get_metric "fork_count"
318
+ end
319
+
320
+ def self.total_fork_count_by_queue(queue)
321
+ get_metric "fork_count:queue:#{queue}"
322
+ end
323
+
324
+ def self.total_fork_count_by_job(job)
325
+ get_metric "fork_count:job:#{job}"
326
+ end
327
+
328
+ def self.failed_depth
329
+ get_metric "depth:failed"
330
+ end
331
+
332
+ def self.pending_depth
333
+ get_metric "depth:pending"
334
+ end
335
+
336
+ def self.depth_by_queue(queue)
337
+ get_metric "depth:queue:#{queue}"
338
+ end
339
+
340
+ module Hooks
341
+
342
+ def after_enqueue_metrics(*args)
343
+ Resque::Metrics.record_job_enqueue(self, *args)
344
+ end
345
+
346
+ def around_perform_metrics(*args)
347
+ start = Time.now
348
+ yield
349
+ finish = ((Time.now.to_f - start.to_f) * 1000).to_i
350
+ Resque::Metrics.record_job_completion(self, finish)
351
+ end
352
+
353
+ def on_failure_metrics(e, *args)
354
+ Resque::Metrics.record_job_failure(self, e)
355
+ end
356
+
357
+ end
358
+
359
+ end
360
+ end
@@ -0,0 +1,2 @@
1
+ require 'resque/metrics/backends/redis'
2
+ require 'resque/metrics/backends/statsd'
@@ -0,0 +1,31 @@
1
+ module Resque
2
+ module Metrics
3
+ module Backends
4
+ class Redis
5
+ attr_accessor :redis
6
+
7
+ def initialize(redis)
8
+ @redis = redis
9
+ end
10
+
11
+ def increment_metric(metric, by = 1)
12
+ redis.incrby("_metrics_:#{metric}", by)
13
+ end
14
+
15
+ def set_metric(metric, val)
16
+ redis.set("_metrics_:#{metric}", val)
17
+ end
18
+
19
+ def set_avg(metric, num, total)
20
+ val = total < 1 ? 0 : num / total
21
+ set_metric(metric, val)
22
+ end
23
+
24
+ def get_metric(metric)
25
+ redis.get("_metrics_:#{metric}").to_i
26
+ end
27
+
28
+ end
29
+ end
30
+ end
31
+ end
@@ -0,0 +1,66 @@
1
+ module Resque
2
+ module Metrics
3
+ module Backends
4
+ class Statsd
5
+ attr_accessor :statsd, :metric_prefix
6
+
7
+ def initialize(statsd, metric_prefix = 'resque')
8
+ @statsd = statsd
9
+ @metric_prefix = metric_prefix
10
+ end
11
+
12
+ def increment_metric(metric, by = 1)
13
+ if metric =~ /^(.+)(?:_job)?_(time|count)(?::(queue|job):(.*))?$/
14
+ event = $1
15
+ event = 'complete' if event == 'job'
16
+
17
+ time_or_count = $2
18
+ queue_or_job = $3
19
+ queue_or_job_name = $4
20
+ key = if queue_or_job && queue_or_job_name
21
+ # ie resque.complete.queue.high.count, resque.failed.job.Index.timing
22
+ "#{metric_prefix}.#{event}.#{queue_or_job}.#{queue_or_job_name}.#{time_or_count}"
23
+ else
24
+
25
+ # ie resque.complete.time
26
+ "#{metric_prefix}.#{event}.#{time_or_count}"
27
+ end
28
+ case time_or_count
29
+ when 'time'
30
+ statsd.timing key, by
31
+ when 'count'
32
+ statsd.increment key, by
33
+ else
34
+ raise "Not sure how to increment_metric for a #{time_or_count} metric (#{metric})"
35
+ end
36
+ elsif metric =~ /^payload_size(?::(queue|job):(.*))?$/
37
+ queue_or_job = $1
38
+ queue_or_job_name = $2
39
+ key = if queue_or_job && queue_or_job_name
40
+ # ie resque.complete.queue.high.count, resque.failed.job.Index.timing
41
+ "#{metric_prefix}.payload_size.#{queue_or_job}.#{queue_or_job_name}.#{time_or_count}"
42
+ else
43
+ "#{metric_prefix}.payload_size.#{time_or_count}"
44
+ end
45
+ statsd.increment key, by
46
+ else
47
+ raise "Not sure how to increment_metric #{metric}"
48
+ end
49
+ end
50
+
51
+ def set_metric(metric, val)
52
+ if metric =~ /^depth(?::(failed|pending|queue)(?::(.+))?)?$/
53
+ key = "#{metric_prefix}.#{metric.gsub(':', '.')}"
54
+ statsd.gauge key, val
55
+ else
56
+ raise "Not sure how to set_metric #{metric}"
57
+ end
58
+ end
59
+
60
+ # set_avg: let statsd & graphite handle that
61
+ # get_metric: would have to talk to graphite. but man, complicated
62
+ end
63
+ end
64
+ end
65
+ end
66
+
@@ -0,0 +1,67 @@
1
+ # Generated by jeweler
2
+ # DO NOT EDIT THIS FILE DIRECTLY
3
+ # Instead, edit Jeweler::Tasks in Rakefile, and run 'rake gemspec'
4
+ # -*- encoding: utf-8 -*-
5
+ # stub: resque-metrics 0.0.6 ruby lib
6
+
7
+ Gem::Specification.new do |s|
8
+ s.name = "vinted-resque-metrics"
9
+ s.version = "0.0.7"
10
+
11
+ s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
12
+ s.require_paths = ["lib"]
13
+ s.authors = ["Aaron Quint", "Tomas Varaneckas"]
14
+ s.date = "2014-02-12"
15
+ s.description = "A simple Resque plugin that times and saves some simple metrics for Resque jobs back into redis. Based on this system\nyou could build some simple auto-scaling mechanism based on the speed and ETA of queues. Also includes a hook/callback\nmechanism for recording/sending the metrics to your favorite tool (AKA statsd/graphite).\n"
16
+ s.email = "aaron@quirkey.com"
17
+ s.extra_rdoc_files = [
18
+ "LICENSE.txt",
19
+ "README.rdoc"
20
+ ]
21
+ s.files = [
22
+ ".document",
23
+ "Gemfile",
24
+ "Gemfile.lock",
25
+ "LICENSE.txt",
26
+ "README.rdoc",
27
+ "Rakefile",
28
+ "VERSION",
29
+ "lib/resque-metrics.rb",
30
+ "lib/resque/metrics.rb",
31
+ "lib/resque/metrics/backends.rb",
32
+ "lib/resque/metrics/backends/redis.rb",
33
+ "lib/resque/metrics/backends/statsd.rb",
34
+ "resque-metrics.gemspec",
35
+ "test/helper.rb",
36
+ "test/redis-test.conf",
37
+ "test/test_resque-metrics.rb"
38
+ ]
39
+ s.homepage = "http://github.com/quirkey/resque-metrics"
40
+ s.licenses = ["MIT"]
41
+ s.rubygems_version = "2.2.1"
42
+ s.summary = "A Resque plugin for recording simple metrics for your jobs"
43
+
44
+ if s.respond_to? :specification_version then
45
+ s.specification_version = 4
46
+
47
+ if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
48
+ s.add_runtime_dependency(%q<resque>, ["~> 1.19"])
49
+ s.add_development_dependency(%q<ruby-debug19>, [">= 0"])
50
+ s.add_development_dependency(%q<minitest>, [">= 2"])
51
+ s.add_development_dependency(%q<bundler>, [">= 0"])
52
+ s.add_development_dependency(%q<jeweler>, [">= 0"])
53
+ else
54
+ s.add_dependency(%q<resque>, ["~> 1.19"])
55
+ s.add_dependency(%q<ruby-debug19>, [">= 0"])
56
+ s.add_dependency(%q<minitest>, [">= 2"])
57
+ s.add_dependency(%q<bundler>, [">= 0"])
58
+ s.add_dependency(%q<jeweler>, [">= 0"])
59
+ end
60
+ else
61
+ s.add_dependency(%q<resque>, ["~> 1.19"])
62
+ s.add_dependency(%q<ruby-debug19>, [">= 0"])
63
+ s.add_dependency(%q<minitest>, [">= 2"])
64
+ s.add_dependency(%q<bundler>, [">= 0"])
65
+ s.add_dependency(%q<jeweler>, [">= 0"])
66
+ end
67
+ end
data/test/helper.rb ADDED
@@ -0,0 +1,79 @@
1
+ dir = File.dirname(File.expand_path(__FILE__))
2
+ require 'rubygems'
3
+ require 'bundler'
4
+ begin
5
+ Bundler.setup(:default, :development)
6
+ rescue Bundler::BundlerError => e
7
+ $stderr.puts e.message
8
+ $stderr.puts "Run `bundle install` to install missing gems"
9
+ exit e.status_code
10
+ end
11
+ require 'minitest/autorun'
12
+
13
+ $LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
14
+ $LOAD_PATH.unshift(File.dirname(__FILE__))
15
+ $TESTING = true
16
+
17
+ require 'resque/metrics'
18
+
19
+ #
20
+ # make sure we can run redis
21
+ #
22
+
23
+ if ENV['BOXEN_REDIS_PORT']
24
+ redis_port = ENV['BOXEN_REDIS_PORT']
25
+ else
26
+ if !system("which redis-server")
27
+ puts '', "** can't find `redis-server` in your path"
28
+ puts "** try running `sudo rake install`"
29
+ abort ''
30
+ end
31
+
32
+ #
33
+ # start our own redis when the tests start,
34
+ # kill it when they end
35
+ #
36
+
37
+ at_exit do
38
+ next if $!
39
+
40
+ if defined?(MiniTest)
41
+ exit_code = MiniTest::Unit.new.run(ARGV)
42
+ else
43
+ exit_code = Test::Unit::AutoRunner.run
44
+ end
45
+
46
+ pid = `ps -e -o pid,command | grep [r]edis-test`.split(" ")[0]
47
+ puts "Killing test redis server..."
48
+ Process.kill("KILL", pid.to_i)
49
+ FileUtils.rm_rf("#{dir}/dump.rdb")
50
+ exit exit_code
51
+ end
52
+
53
+ puts "Starting redis for testing at localhost:9736..."
54
+ `redis-server #{dir}/redis-test.conf`
55
+
56
+ redis_port = 9736
57
+ end
58
+ Resque.redis = "localhost:#{redis_port}:2"
59
+
60
+ class SomeJob
61
+ extend Resque::Metrics
62
+
63
+ @queue = :jobs
64
+
65
+ def self.perform(x, y)
66
+ sleep rand * 0.01
67
+ end
68
+
69
+ end
70
+
71
+ class FailureJob
72
+ extend Resque::Metrics
73
+
74
+ @queue = :jobs
75
+
76
+ def self.perform
77
+ raise "failing lol"
78
+ end
79
+ end
@@ -0,0 +1,130 @@
1
+ # Redis configuration file example
2
+
3
+ # By default Redis does not run as a daemon. Use 'yes' if you need it.
4
+ # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
5
+ daemonize yes
6
+
7
+ # When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
8
+ # You can specify a custom pid file location here.
9
+ pidfile ./test/redis-test.pid
10
+
11
+ # Accept connections on the specified port, default is 6379
12
+ port 9736
13
+
14
+ # If you want you can bind a single interface, if the bind option is not
15
+ # specified all the interfaces will listen for connections.
16
+ #
17
+ # bind 127.0.0.1
18
+
19
+ # Close the connection after a client is idle for N seconds (0 to disable)
20
+ timeout 300
21
+
22
+ # Save the DB on disk:
23
+ #
24
+ # save <seconds> <changes>
25
+ #
26
+ # Will save the DB if both the given number of seconds and the given
27
+ # number of write operations against the DB occurred.
28
+ #
29
+ # In the example below the behaviour will be to save:
30
+ # after 900 sec (15 min) if at least 1 key changed
31
+ # after 300 sec (5 min) if at least 10 keys changed
32
+ # after 60 sec if at least 10000 keys changed
33
+ save 900 1
34
+ save 300 10
35
+ save 60 10000
36
+
37
+ # The filename where to dump the DB
38
+ dbfilename dump.rdb
39
+
40
+ # For default save/load DB in/from the working directory
41
+ # Note that you must specify a directory not a file name.
42
+ dir ./test/
43
+
44
+ # Set server verbosity to 'debug'
45
+ # it can be one of:
46
+ # debug (a lot of information, useful for development/testing)
47
+ # notice (moderately verbose, what you want in production probably)
48
+ # warning (only very important / critical messages are logged)
49
+ loglevel debug
50
+
51
+ # Specify the log file name. Also 'stdout' can be used to force
52
+ # the demon to log on the standard output. Note that if you use standard
53
+ # output for logging but daemonize, logs will be sent to /dev/null
54
+ logfile stdout
55
+
56
+ # Set the number of databases. The default database is DB 0, you can select
57
+ # a different one on a per-connection basis using SELECT <dbid> where
58
+ # dbid is a number between 0 and 'databases'-1
59
+ databases 16
60
+
61
+ ################################# REPLICATION #################################
62
+
63
+ # Master-Slave replication. Use slaveof to make a Redis instance a copy of
64
+ # another Redis server. Note that the configuration is local to the slave
65
+ # so for example it is possible to configure the slave to save the DB with a
66
+ # different interval, or to listen to another port, and so on.
67
+
68
+ # slaveof <masterip> <masterport>
69
+
70
+ ################################## SECURITY ###################################
71
+
72
+ # Require clients to issue AUTH <PASSWORD> before processing any other
73
+ # commands. This might be useful in environments in which you do not trust
74
+ # others with access to the host running redis-server.
75
+ #
76
+ # This should stay commented out for backward compatibility and because most
77
+ # people do not need auth (e.g. they run their own servers).
78
+
79
+ # requirepass foobared
80
+
81
+ ################################### LIMITS ####################################
82
+
83
+ # Set the max number of connected clients at the same time. By default there
84
+ # is no limit, and it's up to the number of file descriptors the Redis process
85
+ # is able to open. The special value '0' means no limts.
86
+ # Once the limit is reached Redis will close all the new connections sending
87
+ # an error 'max number of clients reached'.
88
+
89
+ # maxclients 128
90
+
91
+ # Don't use more memory than the specified amount of bytes.
92
+ # When the memory limit is reached Redis will try to remove keys with an
93
+ # EXPIRE set. It will try to start freeing keys that are going to expire
94
+ # in little time and preserve keys with a longer time to live.
95
+ # Redis will also try to remove objects from free lists if possible.
96
+ #
97
+ # If all this fails, Redis will start to reply with errors to commands
98
+ # that will use more memory, like SET, LPUSH, and so on, and will continue
99
+ # to reply to most read-only commands like GET.
100
+ #
101
+ # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
102
+ # 'state' server or cache, not as a real DB. When Redis is used as a real
103
+ # database the memory usage will grow over the weeks, it will be obvious if
104
+ # it is going to use too much memory in the long run, and you'll have the time
105
+ # to upgrade. With maxmemory after the limit is reached you'll start to get
106
+ # errors for write operations, and this may even lead to DB inconsistency.
107
+
108
+ # maxmemory <bytes>
109
+
110
+ ############################### ADVANCED CONFIG ###############################
111
+
112
+ # Glue small output buffers together in order to send small replies in a
113
+ # single TCP packet. Uses a bit more CPU but most of the times it is a win
114
+ # in terms of number of queries per second. Use 'yes' if unsure.
115
+ glueoutputbuf yes
116
+
117
+ # Use object sharing. Can save a lot of memory if you have many common
118
+ # string in your dataset, but performs lookups against the shared objects
119
+ # pool so it uses more CPU and can be a bit slower. Usually it's a good
120
+ # idea.
121
+ #
122
+ # When object sharing is enabled (shareobjects yes) you can use
123
+ # shareobjectspoolsize to control the size of the pool used in order to try
124
+ # object sharing. A bigger pool size will lead to better sharing capabilities.
125
+ # In general you want this value to be at least the double of the number of
126
+ # very common strings you have in your dataset.
127
+ #
128
+ # WARNING: object sharing is experimental, don't enable this feature
129
+ # in production before of Redis 1.0-stable. Still please try this feature in
130
+ # your development environment so that we can test it better.
@@ -0,0 +1,120 @@
1
+ require 'helper'
2
+
3
+ class TestResqueMetrics < MiniTest::Unit::TestCase
4
+ def setup
5
+ Resque.redis.flushall
6
+
7
+ Resque.before_first_fork = nil
8
+ Resque.before_fork = nil
9
+ Resque.after_fork = nil
10
+
11
+ @num_jobs = 4
12
+ @worker = Resque::Worker.new(:jobs)
13
+ @num_jobs.times do
14
+ work_job
15
+ end
16
+
17
+ @num_failed_jobs = 2
18
+ @num_failed_jobs.times do
19
+ fail_job
20
+ end
21
+ end
22
+
23
+ def test_should_pass_resque_plugin_lint
24
+ assert Resque::Plugin.lint(Resque::Metrics::Hooks)
25
+ end
26
+
27
+ def test_should_perform_job
28
+ assert Resque::Job.new(:jobs, 'class' => SomeJob, 'args' => [1,2]).perform
29
+ end
30
+
31
+ def test_should_record_total_job_time
32
+ assert Resque::Metrics.total_job_time > 0
33
+ assert Resque::Metrics.total_job_time_by_queue(:jobs) > 0
34
+ assert Resque::Metrics.total_job_time_by_job(SomeJob) > 0
35
+ end
36
+
37
+ def test_should_record_enqueue_count
38
+ assert_equal @num_jobs + @num_failed_jobs, Resque::Metrics.total_enqueue_count
39
+ assert_equal @num_jobs + @num_failed_jobs, Resque::Metrics.total_enqueue_count_by_queue(:jobs)
40
+ assert_equal @num_jobs, Resque::Metrics.total_enqueue_count_by_job(SomeJob)
41
+ assert_equal @num_failed_jobs, Resque::Metrics.total_enqueue_count_by_job(FailureJob)
42
+ end
43
+
44
+ def test_should_record_job_count
45
+ assert Resque::Metrics.total_job_count > 0
46
+ assert Resque::Metrics.total_job_count_by_queue(:jobs) > 0
47
+ assert Resque::Metrics.total_job_count_by_job(SomeJob) > 0
48
+ end
49
+
50
+ def test_should_record_failed_job_count
51
+ assert Resque::Metrics.failed_job_count > 0, "Expected #{Resque::Metrics.failed_job_count} to be > 0, but wasn't"
52
+ assert Resque::Metrics.failed_job_count_by_queue(:jobs) > 0
53
+ assert Resque::Metrics.failed_job_count_by_job(FailureJob) > 0
54
+ end
55
+
56
+ def test_should_record_payload_size
57
+ assert Resque::Metrics.total_payload_size > 0
58
+ assert Resque::Metrics.total_payload_size_by_queue(:jobs) > 0
59
+ assert Resque::Metrics.total_payload_size_by_job(SomeJob) > 0
60
+ end
61
+
62
+ def test_should_record_avg_job_time
63
+ assert Resque::Metrics.avg_job_time > 0
64
+ assert Resque::Metrics.avg_job_time_by_queue(:jobs) > 0
65
+ assert Resque::Metrics.avg_job_time_by_job(SomeJob) > 0
66
+ end
67
+
68
+ def test_should_call_job_complete_callbacks
69
+ recorded = []
70
+ recorded_count = 0
71
+ Resque::Metrics.on_job_complete do |klass, queue, time|
72
+ recorded << {:klass => klass, :queue => queue, :time => time }
73
+ end
74
+ Resque::Metrics.on_job_complete do |klass, queue, time|
75
+ recorded_count += 1
76
+ end
77
+
78
+ work_job
79
+ work_job
80
+
81
+ assert_equal 2, recorded.length
82
+ assert_equal SomeJob, recorded[0][:klass]
83
+ assert_equal :jobs, recorded[0][:queue]
84
+ assert recorded[0][:time] > 0, "Expected #{recorded[0][:time]} to be > 0, but wasn't"
85
+ assert_equal 2, recorded_count
86
+ end
87
+
88
+ def test_should_call_job_failure_callbacks
89
+ recorded = []
90
+ recorded_count = 0
91
+ Resque::Metrics.on_job_failure do |klass, queue|
92
+ recorded << {:klass => klass, :queue => queue}
93
+ end
94
+ Resque::Metrics.on_job_failure do |klass, queue|
95
+ recorded_count += 1
96
+ end
97
+
98
+ fail_job
99
+ fail_job
100
+ fail_job
101
+
102
+ assert_equal 3, recorded.length
103
+ assert_equal FailureJob, recorded[0][:klass]
104
+ assert_equal :jobs, recorded[0][:queue]
105
+ assert_equal 3, recorded_count
106
+ end
107
+
108
+ private
109
+
110
+ def work_job
111
+ Resque.enqueue(SomeJob, 20, '/tmp')
112
+ @worker.work(0)
113
+ end
114
+
115
+ def fail_job
116
+ Resque.enqueue(FailureJob)
117
+ @worker.work(0)
118
+ end
119
+
120
+ end
metadata ADDED
@@ -0,0 +1,135 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: vinted-resque-metrics
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.0.7
5
+ platform: ruby
6
+ authors:
7
+ - Aaron Quint
8
+ - Tomas Varaneckas
9
+ autorequire:
10
+ bindir: bin
11
+ cert_chain: []
12
+ date: 2014-02-12 00:00:00.000000000 Z
13
+ dependencies:
14
+ - !ruby/object:Gem::Dependency
15
+ name: resque
16
+ requirement: !ruby/object:Gem::Requirement
17
+ requirements:
18
+ - - "~>"
19
+ - !ruby/object:Gem::Version
20
+ version: '1.19'
21
+ type: :runtime
22
+ prerelease: false
23
+ version_requirements: !ruby/object:Gem::Requirement
24
+ requirements:
25
+ - - "~>"
26
+ - !ruby/object:Gem::Version
27
+ version: '1.19'
28
+ - !ruby/object:Gem::Dependency
29
+ name: ruby-debug19
30
+ requirement: !ruby/object:Gem::Requirement
31
+ requirements:
32
+ - - ">="
33
+ - !ruby/object:Gem::Version
34
+ version: '0'
35
+ type: :development
36
+ prerelease: false
37
+ version_requirements: !ruby/object:Gem::Requirement
38
+ requirements:
39
+ - - ">="
40
+ - !ruby/object:Gem::Version
41
+ version: '0'
42
+ - !ruby/object:Gem::Dependency
43
+ name: minitest
44
+ requirement: !ruby/object:Gem::Requirement
45
+ requirements:
46
+ - - ">="
47
+ - !ruby/object:Gem::Version
48
+ version: '2'
49
+ type: :development
50
+ prerelease: false
51
+ version_requirements: !ruby/object:Gem::Requirement
52
+ requirements:
53
+ - - ">="
54
+ - !ruby/object:Gem::Version
55
+ version: '2'
56
+ - !ruby/object:Gem::Dependency
57
+ name: bundler
58
+ requirement: !ruby/object:Gem::Requirement
59
+ requirements:
60
+ - - ">="
61
+ - !ruby/object:Gem::Version
62
+ version: '0'
63
+ type: :development
64
+ prerelease: false
65
+ version_requirements: !ruby/object:Gem::Requirement
66
+ requirements:
67
+ - - ">="
68
+ - !ruby/object:Gem::Version
69
+ version: '0'
70
+ - !ruby/object:Gem::Dependency
71
+ name: jeweler
72
+ requirement: !ruby/object:Gem::Requirement
73
+ requirements:
74
+ - - ">="
75
+ - !ruby/object:Gem::Version
76
+ version: '0'
77
+ type: :development
78
+ prerelease: false
79
+ version_requirements: !ruby/object:Gem::Requirement
80
+ requirements:
81
+ - - ">="
82
+ - !ruby/object:Gem::Version
83
+ version: '0'
84
+ description: |
85
+ A simple Resque plugin that times and saves some simple metrics for Resque jobs back into redis. Based on this system
86
+ you could build some simple auto-scaling mechanism based on the speed and ETA of queues. Also includes a hook/callback
87
+ mechanism for recording/sending the metrics to your favorite tool (AKA statsd/graphite).
88
+ email: aaron@quirkey.com
89
+ executables: []
90
+ extensions: []
91
+ extra_rdoc_files:
92
+ - LICENSE.txt
93
+ - README.rdoc
94
+ files:
95
+ - ".document"
96
+ - Gemfile
97
+ - Gemfile.lock
98
+ - LICENSE.txt
99
+ - README.rdoc
100
+ - Rakefile
101
+ - VERSION
102
+ - lib/resque-metrics.rb
103
+ - lib/resque/metrics.rb
104
+ - lib/resque/metrics/backends.rb
105
+ - lib/resque/metrics/backends/redis.rb
106
+ - lib/resque/metrics/backends/statsd.rb
107
+ - resque-metrics.gemspec
108
+ - test/helper.rb
109
+ - test/redis-test.conf
110
+ - test/test_resque-metrics.rb
111
+ homepage: http://github.com/quirkey/resque-metrics
112
+ licenses:
113
+ - MIT
114
+ metadata: {}
115
+ post_install_message:
116
+ rdoc_options: []
117
+ require_paths:
118
+ - lib
119
+ required_ruby_version: !ruby/object:Gem::Requirement
120
+ requirements:
121
+ - - ">="
122
+ - !ruby/object:Gem::Version
123
+ version: '0'
124
+ required_rubygems_version: !ruby/object:Gem::Requirement
125
+ requirements:
126
+ - - ">="
127
+ - !ruby/object:Gem::Version
128
+ version: '0'
129
+ requirements: []
130
+ rubyforge_project:
131
+ rubygems_version: 2.2.2
132
+ signing_key:
133
+ specification_version: 4
134
+ summary: A Resque plugin for recording simple metrics for your jobs
135
+ test_files: []