resque-metrics 0.0.1

Sign up to get free protection for your applications and to get access to all the features.
data/.document ADDED
@@ -0,0 +1,5 @@
1
+ lib/**/*.rb
2
+ bin/*
3
+ -
4
+ features/**/*.feature
5
+ LICENSE.txt
data/Gemfile ADDED
@@ -0,0 +1,17 @@
1
+ source "http://rubygems.org"
2
+
3
+ gem 'resque', '~>1.19'
4
+ # Add dependencies required to use your gem here.
5
+ # Example:
6
+ # gem "activesupport", ">= 2.3.5"
7
+
8
+ # Add dependencies to develop your gem here.
9
+ # Include everything needed to run rake, tests, features, etc.
10
+ group :development do
11
+ platform :mri_19 do
12
+ gem "ruby-debug19"
13
+ end
14
+ gem 'minitest', '>=2'
15
+ gem "bundler", "~> 1.0.0"
16
+ gem "jeweler", "~> 1.6.2"
17
+ end
data/Gemfile.lock ADDED
@@ -0,0 +1,50 @@
1
+ GEM
2
+ remote: http://rubygems.org/
3
+ specs:
4
+ archive-tar-minitar (0.5.2)
5
+ columnize (0.3.4)
6
+ git (1.2.5)
7
+ jeweler (1.6.2)
8
+ bundler (~> 1.0)
9
+ git (>= 1.2.5)
10
+ rake
11
+ linecache19 (0.5.12)
12
+ ruby_core_source (>= 0.1.4)
13
+ minitest (2.3.1)
14
+ multi_json (1.0.3)
15
+ rack (1.3.3)
16
+ rake (0.9.2)
17
+ redis (2.2.0)
18
+ redis-namespace (1.0.3)
19
+ redis (< 3.0.0)
20
+ resque (1.19.0.1)
21
+ multi_json (~> 1.0)
22
+ redis-namespace (~> 1.0.2)
23
+ sinatra (>= 0.9.2)
24
+ vegas (~> 0.1.2)
25
+ ruby-debug-base19 (0.11.25)
26
+ columnize (>= 0.3.1)
27
+ linecache19 (>= 0.5.11)
28
+ ruby_core_source (>= 0.1.4)
29
+ ruby-debug19 (0.11.6)
30
+ columnize (>= 0.3.1)
31
+ linecache19 (>= 0.5.11)
32
+ ruby-debug-base19 (>= 0.11.19)
33
+ ruby_core_source (0.1.5)
34
+ archive-tar-minitar (>= 0.5.2)
35
+ sinatra (1.2.7)
36
+ rack (~> 1.1)
37
+ tilt (< 2.0, >= 1.2.2)
38
+ tilt (1.3.3)
39
+ vegas (0.1.8)
40
+ rack (>= 1.0.0)
41
+
42
+ PLATFORMS
43
+ ruby
44
+
45
+ DEPENDENCIES
46
+ bundler (~> 1.0.0)
47
+ jeweler (~> 1.6.2)
48
+ minitest (>= 2)
49
+ resque (~> 1.19)
50
+ ruby-debug19
data/LICENSE.txt ADDED
@@ -0,0 +1,20 @@
1
+ Copyright (c) 2011 Aaron Quint
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining
4
+ a copy of this software and associated documentation files (the
5
+ "Software"), to deal in the Software without restriction, including
6
+ without limitation the rights to use, copy, modify, merge, publish,
7
+ distribute, sublicense, and/or sell copies of the Software, and to
8
+ permit persons to whom the Software is furnished to do so, subject to
9
+ the following conditions:
10
+
11
+ The above copyright notice and this permission notice shall be
12
+ included in all copies or substantial portions of the Software.
13
+
14
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
data/README.rdoc ADDED
@@ -0,0 +1,89 @@
1
+ = resque-metrics
2
+
3
+ A simple Resque plugin that times and saves some simple metrics for Resque jobs back into redis. Based on this system
4
+ you could build some simple auto-scaling mechanism based on the speed and ETA of queues. Also includes a hook/callback
5
+ mechanism for recording/sending the metrics to your favorite tool (AKA statsd/graphite).
6
+
7
+ == Installation
8
+
9
+ gem install resque-metrics
10
+
11
+ == Usage
12
+
13
+ Given a job, extend the job class with Resque::Metrics.
14
+
15
+ class SomeJob
16
+ extend ::Resque::Metrics
17
+
18
+ @queue = :jobs
19
+
20
+ def self.perform(x, y)
21
+ # sleep 10
22
+ end
23
+
24
+ end
25
+
26
+ By default this will record the total job count, the total count of jobs enqueued, the total time the jobs took, the avg time the jobs took. It
27
+ will also record each of these per-queue and per-bob class. So for the job above it will record values and you will be able to fetch them
28
+ with module methods:
29
+
30
+ Resque::Metrics.total_job_count #=> 1
31
+ Resque::Metrics.total_job_count_by_job(SomeJob) #=> 1
32
+ Resque::Metrics.total_job_count_by_queue(:jobs) #=> 10000
33
+ Resque::Metrics.total_job_time #=> 10000
34
+ Resque::Metrics.total_job_time_by_job(SomeJob) #=> 10000
35
+ Resque::Metrics.total_job_time_by_queue(:jobs) #=> 10000
36
+ Resque::Metrics.avg_job_time #=> 1000
37
+ Resque::Metrics.avg_job_time_by_job(SomeJob) #=> 1000
38
+ Resque::Metrics.avg_job_time_by_queue(:jobs) #=> 1000
39
+
40
+ All values are recorded and returned as integers. For times, values are in milliseconds.
41
+
42
+ === Forking Metrics
43
+
44
+ Resque::Metrics can also record forking metrics but these are not on by default as `before_fork` and `after_fork` are singluar hooks.
45
+ If you don't need to define your own fork hooks you can simply add a line to an initializer:
46
+
47
+ Resque::Metrics.watch_fork
48
+
49
+ If you do define you're own fork hooks:
50
+
51
+ Resque.before_fork do |job|
52
+ # my own fork code
53
+ Resque::Metrics.before_fork.call(job)
54
+ end
55
+
56
+ # Resque::Metrics.(before/after)_fork just returns a lambda so just assign it if you like
57
+ Resque.after_fork = Resque::Metrics.after_fork
58
+
59
+ Once enabled this will add `.*_fork_*` methods like `avg_fork_time`, etc.
60
+ Latest Resque is required for fork recording to work.
61
+
62
+ === Callbacks/Hooks
63
+
64
+ Resque::Metrics also has a simple callback/hook system so you can send data to your favorite agent. All hooks are passed the job class,
65
+ the queue, and the time of the metric.
66
+
67
+ # Also `on_job_fork` and `on_job_enqueue`
68
+ Resque::Metric.on_job_complete do |job_class, queue, time|
69
+ # send to your metrics agent
70
+ Statsd.timing "resque.#{job_class}.complete_time", time
71
+ Statsd.increment "resque.#{job_class}.complete"
72
+ # etc
73
+ end
74
+
75
+ == Contributing to resque-metrics
76
+
77
+ * Check out the latest master to make sure the feature hasn't been implemented or the bug hasn't been fixed yet
78
+ * Check out the issue tracker to make sure someone already hasn't requested it and/or contributed it
79
+ * Fork the project
80
+ * Start a feature/bugfix branch
81
+ * Commit and push until you are happy with your contribution
82
+ * Make sure to add tests for it. This is important so I don't break it in a future version unintentionally.
83
+ * Please try not to mess with the Rakefile, version, or history. If you want to have your own version, or is otherwise necessary, that is fine, but please isolate to its own commit so I can cherry-pick around it.
84
+
85
+ == Copyright
86
+
87
+ Copyright (c) 2011 Aaron Quint. See LICENSE.txt for
88
+ further details.
89
+
data/Rakefile ADDED
@@ -0,0 +1,49 @@
1
+ # encoding: utf-8
2
+
3
+ require 'rubygems'
4
+ require 'bundler'
5
+ begin
6
+ Bundler.setup(:default, :development)
7
+ rescue Bundler::BundlerError => e
8
+ $stderr.puts e.message
9
+ $stderr.puts "Run `bundle install` to install missing gems"
10
+ exit e.status_code
11
+ end
12
+ require 'rake'
13
+
14
+ require 'jeweler'
15
+ Jeweler::Tasks.new do |gem|
16
+ # gem is a Gem::Specification... see http://docs.rubygems.org/read/chapter/20 for more options
17
+ gem.name = "resque-metrics"
18
+ gem.homepage = "http://github.com/quirkey/resque-metrics"
19
+ gem.license = "MIT"
20
+ gem.summary = %Q{A Resque plugin for recording simple metrics for your jobs}
21
+ gem.description = <<-desc
22
+ A simple Resque plugin that times and saves some simple metrics for Resque jobs back into redis. Based on this system
23
+ you could build some simple auto-scaling mechanism based on the speed and ETA of queues. Also includes a hook/callback
24
+ mechanism for recording/sending the metrics to your favorite tool (AKA statsd/graphite).
25
+ desc
26
+ gem.email = "aaron@quirkey.com"
27
+ gem.authors = ["Aaron Quint"]
28
+ # dependencies defined in Gemfile
29
+ end
30
+ Jeweler::RubygemsDotOrgTasks.new
31
+
32
+ require 'rake/testtask'
33
+ Rake::TestTask.new(:test) do |test|
34
+ test.libs << 'lib' << 'test'
35
+ test.pattern = 'test/**/test_*.rb'
36
+ test.verbose = true
37
+ end
38
+
39
+ task :default => :test
40
+
41
+ require 'rake/rdoctask'
42
+ Rake::RDocTask.new do |rdoc|
43
+ version = File.exist?('VERSION') ? File.read('VERSION') : ""
44
+
45
+ rdoc.rdoc_dir = 'rdoc'
46
+ rdoc.title = "resque-metrics #{version}"
47
+ rdoc.rdoc_files.include('README*')
48
+ rdoc.rdoc_files.include('lib/**/*.rb')
49
+ end
data/VERSION ADDED
@@ -0,0 +1 @@
1
+ 0.0.1
@@ -0,0 +1,222 @@
1
+ require 'resque'
2
+
3
+ module Resque
4
+ module Metrics
5
+
6
+ VERSION = '0.0.1'
7
+
8
+ def self.extended(klass)
9
+ klass.extend(Hooks)
10
+ end
11
+
12
+ def self.redis
13
+ ::Resque.redis
14
+ end
15
+
16
+ def self.watch_fork
17
+ ::Resque.before_fork = before_fork
18
+ ::Resque.after_fork = after_fork
19
+ end
20
+
21
+ def self.on_job_fork(&block)
22
+ set_callback(:on_job_fork, &block)
23
+ end
24
+
25
+ def self.on_job_complete(&block)
26
+ set_callback(:on_job_complete, &block)
27
+ end
28
+
29
+ def self.on_job_enqueue(&block)
30
+ set_callback(:on_job_enqueue, &block)
31
+ end
32
+
33
+ def self.set_callback(callback_name, &block)
34
+ @callbacks ||= {}
35
+ @callbacks[callback_name] ||= []
36
+ @callbacks[callback_name] << block
37
+ end
38
+
39
+ def self.run_callback(callback_name, *args)
40
+ if @callbacks && @callbacks[callback_name]
41
+ @callbacks[callback_name].each {|callback| callback.call(*args) }
42
+ end
43
+ end
44
+
45
+ def self.before_fork
46
+ lambda do |job|
47
+ start = Time.now.to_f * 1000
48
+ key = "_metrics_:fork_start:#{job.worker.to_s}"
49
+ ::Resque.redis.set key, start
50
+ ::Resque.redis.expire key, 60 * 60 * 60
51
+ true
52
+ end
53
+ end
54
+
55
+ def self.after_fork
56
+ lambda do |job|
57
+ end_time = Time.now.to_f * 1000
58
+ key = "_metrics_:fork_start:#{job.worker.to_s}"
59
+ start_time = ::Resque.redis.get key
60
+ if start_time
61
+ total = (end_time - start_time.to_f).to_i
62
+ ::Resque::Metrics.record_job_fork(job, total)
63
+ end
64
+ true
65
+ end
66
+ end
67
+
68
+ def self.record_job_fork(job, time)
69
+ job_class = job.payload_class
70
+ queue = job.queue
71
+ redis.multi do
72
+ increment_metric "fork_time", time
73
+ increment_metric "fork_time:queue:#{queue}", time
74
+ increment_metric "fork_time:job:#{job_class}", time
75
+ increment_metric "fork_count"
76
+ increment_metric "fork_count:queue:#{queue}"
77
+ increment_metric "fork_count:job:#{job_class}"
78
+ end
79
+ set_metric "avg_fork_time", total_fork_time / total_fork_count
80
+ set_metric "avg_fork_time:queue:#{queue}", total_fork_time_by_queue(queue) / total_fork_count_by_queue(queue)
81
+ set_metric "avg_fork_time:job:#{job_class}", total_fork_time_by_job(job_class) / total_fork_count_by_job(job_class)
82
+ run_callback(:on_job_fork, job_class, queue, time)
83
+ end
84
+
85
+ def self.record_job_enqueue(job_class)
86
+ queue = Resque.queue_from_class(job_class)
87
+ increment_metric "enqueue_count"
88
+ increment_metric "enqueue_count:job:#{job_class}"
89
+ increment_metric "enqueue_count:queue:#{queue}"
90
+ run_callback(:on_job_enqueue, job_class, queue)
91
+ true
92
+ end
93
+
94
+ def self.record_job_completion(job_class, time)
95
+ queue = Resque.queue_from_class(job_class)
96
+ redis.multi do
97
+ increment_metric "job_time", time
98
+ increment_metric "job_time:queue:#{queue}", time
99
+ increment_metric "job_time:job:#{job_class}", time
100
+ increment_metric "job_count"
101
+ increment_metric "job_count:queue:#{queue}"
102
+ increment_metric "job_count:job:#{job_class}"
103
+ end
104
+ set_metric "avg_job_time", total_job_time / total_job_count
105
+ set_metric "avg_job_time:queue:#{queue}", total_job_time_by_queue(queue) / total_job_count_by_queue(queue)
106
+ set_metric "avg_job_time:job:#{job_class}", total_job_time_by_job(job_class) / total_job_count_by_job(job_class)
107
+ run_callback(:on_job_complete, job_class, queue, time)
108
+ end
109
+
110
+ def self.increment_metric(metric, by = 1)
111
+ redis.incrby("_metrics_:#{metric}", by)
112
+ end
113
+
114
+ def self.set_metric(metric, val)
115
+ redis.incrby("_metrics_:#{metric}", val)
116
+ end
117
+
118
+ def self.get_metric(metric)
119
+ redis.get("_metrics_:#{metric}").to_i
120
+ end
121
+
122
+ def self.total_enqueue_count
123
+ get_metric "enqueue_count"
124
+ end
125
+
126
+ def self.total_enqueue_count_by_job(job)
127
+ get_metric "enqueue_count:job:#{job}"
128
+ end
129
+
130
+ def self.total_enqueue_count_by_queue(queue)
131
+ get_metric "enqueue_count:queue:#{queue}"
132
+ end
133
+
134
+ def self.avg_job_time
135
+ get_metric "avg_job_time"
136
+ end
137
+
138
+ def self.avg_job_time_by_queue(queue)
139
+ get_metric "avg_job_time:queue:#{queue}"
140
+ end
141
+
142
+ def self.avg_job_time_by_job(job)
143
+ get_metric "avg_job_time:job:#{job}"
144
+ end
145
+
146
+ def self.total_job_time
147
+ get_metric "job_time"
148
+ end
149
+
150
+ def self.total_job_time_by_queue(queue)
151
+ get_metric "job_time:queue:#{queue}"
152
+ end
153
+
154
+ def self.total_job_time_by_job(job)
155
+ get_metric "job_time:job:#{job}"
156
+ end
157
+
158
+ def self.total_job_count
159
+ get_metric "job_count"
160
+ end
161
+
162
+ def self.total_job_count_by_queue(queue)
163
+ get_metric "job_count:queue:#{queue}"
164
+ end
165
+
166
+ def self.total_job_count_by_job(job)
167
+ get_metric "job_count:job:#{job}"
168
+ end
169
+
170
+ def self.avg_fork_time
171
+ get_metric "avg_fork_time"
172
+ end
173
+
174
+ def self.avg_fork_time_by_queue(queue)
175
+ get_metric "avg_fork_time:queue:#{queue}"
176
+ end
177
+
178
+ def self.avg_fork_time_by_job(job)
179
+ get_metric "avg_fork_time:job:#{job}"
180
+ end
181
+
182
+ def self.total_fork_time
183
+ get_metric "fork_time"
184
+ end
185
+
186
+ def self.total_fork_time_by_queue(queue)
187
+ get_metric "fork_time:queue:#{queue}"
188
+ end
189
+
190
+ def self.total_fork_time_by_job(job)
191
+ get_metric "fork_time:job:#{job}"
192
+ end
193
+
194
+ def self.total_fork_count
195
+ get_metric "fork_count"
196
+ end
197
+
198
+ def self.total_fork_count_by_queue(queue)
199
+ get_metric "fork_count:queue:#{queue}"
200
+ end
201
+
202
+ def self.total_fork_count_by_job(job)
203
+ get_metric "fork_count:job:#{job}"
204
+ end
205
+
206
+ module Hooks
207
+
208
+ def after_enqueue_metrics(*args)
209
+ Resque::Metrics.record_job_enqueue(self)
210
+ end
211
+
212
+ def around_perform_metrics(*args)
213
+ start = Time.now
214
+ yield
215
+ finish = ((Time.now.to_f - start.to_f) * 1000).to_i
216
+ Resque::Metrics.record_job_completion(self, finish)
217
+ end
218
+
219
+ end
220
+
221
+ end
222
+ end
@@ -0,0 +1 @@
1
+ require 'resque/metrics'
data/test/helper.rb ADDED
@@ -0,0 +1,63 @@
1
+ dir = File.dirname(File.expand_path(__FILE__))
2
+ require 'rubygems'
3
+ require 'bundler'
4
+ begin
5
+ Bundler.setup(:default, :development)
6
+ rescue Bundler::BundlerError => e
7
+ $stderr.puts e.message
8
+ $stderr.puts "Run `bundle install` to install missing gems"
9
+ exit e.status_code
10
+ end
11
+ require 'minitest/autorun'
12
+
13
+ $LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
14
+ $LOAD_PATH.unshift(File.dirname(__FILE__))
15
+ $TESTING = true
16
+
17
+ require 'resque/metrics'
18
+
19
+ #
20
+ # make sure we can run redis
21
+ #
22
+
23
+ if !system("which redis-server")
24
+ puts '', "** can't find `redis-server` in your path"
25
+ puts "** try running `sudo rake install`"
26
+ abort ''
27
+ end
28
+
29
+ #
30
+ # start our own redis when the tests start,
31
+ # kill it when they end
32
+ #
33
+
34
+ at_exit do
35
+ next if $!
36
+
37
+ if defined?(MiniTest)
38
+ exit_code = MiniTest::Unit.new.run(ARGV)
39
+ else
40
+ exit_code = Test::Unit::AutoRunner.run
41
+ end
42
+
43
+ pid = `ps -e -o pid,command | grep [r]edis-test`.split(" ")[0]
44
+ puts "Killing test redis server..."
45
+ `rm -f #{dir}/dump.rdb`
46
+ Process.kill("KILL", pid.to_i)
47
+ exit exit_code
48
+ end
49
+
50
+ puts "Starting redis for testing at localhost:9736..."
51
+ `redis-server #{dir}/redis-test.conf`
52
+ Resque.redis = 'localhost:9736:2'
53
+
54
+ class SomeJob
55
+ extend Resque::Metrics
56
+
57
+ @queue = :jobs
58
+
59
+ def self.perform(x, y)
60
+ sleep rand * 0.01
61
+ end
62
+
63
+ end
@@ -0,0 +1,130 @@
1
+ # Redis configuration file example
2
+
3
+ # By default Redis does not run as a daemon. Use 'yes' if you need it.
4
+ # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
5
+ daemonize yes
6
+
7
+ # When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
8
+ # You can specify a custom pid file location here.
9
+ pidfile ./test/redis-test.pid
10
+
11
+ # Accept connections on the specified port, default is 6379
12
+ port 9736
13
+
14
+ # If you want you can bind a single interface, if the bind option is not
15
+ # specified all the interfaces will listen for connections.
16
+ #
17
+ # bind 127.0.0.1
18
+
19
+ # Close the connection after a client is idle for N seconds (0 to disable)
20
+ timeout 300
21
+
22
+ # Save the DB on disk:
23
+ #
24
+ # save <seconds> <changes>
25
+ #
26
+ # Will save the DB if both the given number of seconds and the given
27
+ # number of write operations against the DB occurred.
28
+ #
29
+ # In the example below the behaviour will be to save:
30
+ # after 900 sec (15 min) if at least 1 key changed
31
+ # after 300 sec (5 min) if at least 10 keys changed
32
+ # after 60 sec if at least 10000 keys changed
33
+ save 900 1
34
+ save 300 10
35
+ save 60 10000
36
+
37
+ # The filename where to dump the DB
38
+ dbfilename dump.rdb
39
+
40
+ # For default save/load DB in/from the working directory
41
+ # Note that you must specify a directory not a file name.
42
+ dir ./test/
43
+
44
+ # Set server verbosity to 'debug'
45
+ # it can be one of:
46
+ # debug (a lot of information, useful for development/testing)
47
+ # notice (moderately verbose, what you want in production probably)
48
+ # warning (only very important / critical messages are logged)
49
+ loglevel debug
50
+
51
+ # Specify the log file name. Also 'stdout' can be used to force
52
+ # the demon to log on the standard output. Note that if you use standard
53
+ # output for logging but daemonize, logs will be sent to /dev/null
54
+ logfile stdout
55
+
56
+ # Set the number of databases. The default database is DB 0, you can select
57
+ # a different one on a per-connection basis using SELECT <dbid> where
58
+ # dbid is a number between 0 and 'databases'-1
59
+ databases 16
60
+
61
+ ################################# REPLICATION #################################
62
+
63
+ # Master-Slave replication. Use slaveof to make a Redis instance a copy of
64
+ # another Redis server. Note that the configuration is local to the slave
65
+ # so for example it is possible to configure the slave to save the DB with a
66
+ # different interval, or to listen to another port, and so on.
67
+
68
+ # slaveof <masterip> <masterport>
69
+
70
+ ################################## SECURITY ###################################
71
+
72
+ # Require clients to issue AUTH <PASSWORD> before processing any other
73
+ # commands. This might be useful in environments in which you do not trust
74
+ # others with access to the host running redis-server.
75
+ #
76
+ # This should stay commented out for backward compatibility and because most
77
+ # people do not need auth (e.g. they run their own servers).
78
+
79
+ # requirepass foobared
80
+
81
+ ################################### LIMITS ####################################
82
+
83
+ # Set the max number of connected clients at the same time. By default there
84
+ # is no limit, and it's up to the number of file descriptors the Redis process
85
+ # is able to open. The special value '0' means no limts.
86
+ # Once the limit is reached Redis will close all the new connections sending
87
+ # an error 'max number of clients reached'.
88
+
89
+ # maxclients 128
90
+
91
+ # Don't use more memory than the specified amount of bytes.
92
+ # When the memory limit is reached Redis will try to remove keys with an
93
+ # EXPIRE set. It will try to start freeing keys that are going to expire
94
+ # in little time and preserve keys with a longer time to live.
95
+ # Redis will also try to remove objects from free lists if possible.
96
+ #
97
+ # If all this fails, Redis will start to reply with errors to commands
98
+ # that will use more memory, like SET, LPUSH, and so on, and will continue
99
+ # to reply to most read-only commands like GET.
100
+ #
101
+ # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
102
+ # 'state' server or cache, not as a real DB. When Redis is used as a real
103
+ # database the memory usage will grow over the weeks, it will be obvious if
104
+ # it is going to use too much memory in the long run, and you'll have the time
105
+ # to upgrade. With maxmemory after the limit is reached you'll start to get
106
+ # errors for write operations, and this may even lead to DB inconsistency.
107
+
108
+ # maxmemory <bytes>
109
+
110
+ ############################### ADVANCED CONFIG ###############################
111
+
112
+ # Glue small output buffers together in order to send small replies in a
113
+ # single TCP packet. Uses a bit more CPU but most of the times it is a win
114
+ # in terms of number of queries per second. Use 'yes' if unsure.
115
+ glueoutputbuf yes
116
+
117
+ # Use object sharing. Can save a lot of memory if you have many common
118
+ # string in your dataset, but performs lookups against the shared objects
119
+ # pool so it uses more CPU and can be a bit slower. Usually it's a good
120
+ # idea.
121
+ #
122
+ # When object sharing is enabled (shareobjects yes) you can use
123
+ # shareobjectspoolsize to control the size of the pool used in order to try
124
+ # object sharing. A bigger pool size will lead to better sharing capabilities.
125
+ # In general you want this value to be at least the double of the number of
126
+ # very common strings you have in your dataset.
127
+ #
128
+ # WARNING: object sharing is experimental, don't enable this feature
129
+ # in production before of Redis 1.0-stable. Still please try this feature in
130
+ # your development environment so that we can test it better.
@@ -0,0 +1,88 @@
1
+ require 'helper'
2
+
3
+ class TestResqueMetrics < MiniTest::Unit::TestCase
4
+ def setup
5
+ Resque.redis.flushall
6
+
7
+ Resque.before_first_fork = nil
8
+ Resque.before_fork = nil
9
+ Resque.after_fork = nil
10
+
11
+ @num_jobs = 4
12
+ @worker = Resque::Worker.new(:jobs)
13
+ @num_jobs.times do
14
+ work_job
15
+ end
16
+ end
17
+
18
+ def test_should_pass_resque_plugin_lint
19
+ assert Resque::Plugin.lint(Resque::Metrics::Hooks)
20
+ end
21
+
22
+ def test_should_perform_job
23
+ assert Resque::Job.new(:jobs, 'class' => SomeJob, 'args' => [1,2]).perform
24
+ end
25
+
26
+ def test_should_record_total_job_time
27
+ assert Resque::Metrics.total_job_time > 0
28
+ assert Resque::Metrics.total_job_time_by_queue(:jobs) > 0
29
+ assert Resque::Metrics.total_job_time_by_job(SomeJob) > 0
30
+ end
31
+
32
+ def test_should_record_enqueue_count
33
+ assert_equal @num_jobs, Resque::Metrics.total_enqueue_count
34
+ assert_equal @num_jobs, Resque::Metrics.total_enqueue_count_by_queue(:jobs)
35
+ assert_equal @num_jobs, Resque::Metrics.total_enqueue_count_by_job(SomeJob)
36
+ end
37
+
38
+ def test_should_record_job_count
39
+ assert Resque::Metrics.total_job_count > 0
40
+ assert Resque::Metrics.total_job_count_by_queue(:jobs) > 0
41
+ assert Resque::Metrics.total_job_count_by_job(SomeJob) > 0
42
+ end
43
+
44
+ def test_should_record_avg_job_time
45
+ assert Resque::Metrics.avg_job_time > 0
46
+ assert Resque::Metrics.avg_job_time_by_queue(:jobs) > 0
47
+ assert Resque::Metrics.avg_job_time_by_job(SomeJob) > 0
48
+ end
49
+
50
+ def test_should_call_callbacks
51
+ recorded = []
52
+ recorded_count = 0
53
+ Resque::Metrics.on_job_complete do |klass, queue, time|
54
+ recorded << [klass, queue, time]
55
+ end
56
+ Resque::Metrics.on_job_complete do |klass, queue, time|
57
+ recorded_count += 1
58
+ end
59
+ work_job
60
+ work_job
61
+ assert_equal 2, recorded.length
62
+ assert_equal SomeJob, recorded[0][0]
63
+ assert_equal :jobs, recorded[0][1]
64
+ assert recorded[0][2] > 0
65
+ assert_equal 2, recorded_count
66
+ end
67
+
68
+ def test_should_record_fork_times
69
+ Resque::Metrics.watch_fork
70
+ Resque.after_fork do |job|
71
+ sleep 0.1
72
+ Resque::Metrics.after_fork.call(job)
73
+ end
74
+ work_job
75
+ work_job
76
+ assert_equal 2, Resque::Metrics.total_fork_count
77
+ assert Resque::Metrics.avg_fork_time > 0
78
+ assert Resque::Metrics.avg_fork_time_by_queue(:jobs) > 0
79
+ assert Resque::Metrics.avg_fork_time_by_job(SomeJob) > 0
80
+ end
81
+
82
+ private
83
+ def work_job
84
+ Resque.enqueue(SomeJob, 20, '/tmp')
85
+ @worker.work(0)
86
+ end
87
+
88
+ end
metadata ADDED
@@ -0,0 +1,125 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: resque-metrics
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.0.1
5
+ prerelease:
6
+ platform: ruby
7
+ authors:
8
+ - Aaron Quint
9
+ autorequire:
10
+ bindir: bin
11
+ cert_chain: []
12
+ date: 2011-10-05 00:00:00.000000000Z
13
+ dependencies:
14
+ - !ruby/object:Gem::Dependency
15
+ name: resque
16
+ requirement: &70326698893780 !ruby/object:Gem::Requirement
17
+ none: false
18
+ requirements:
19
+ - - ~>
20
+ - !ruby/object:Gem::Version
21
+ version: '1.19'
22
+ type: :runtime
23
+ prerelease: false
24
+ version_requirements: *70326698893780
25
+ - !ruby/object:Gem::Dependency
26
+ name: ruby-debug19
27
+ requirement: &70326698892420 !ruby/object:Gem::Requirement
28
+ none: false
29
+ requirements:
30
+ - - ! '>='
31
+ - !ruby/object:Gem::Version
32
+ version: '0'
33
+ type: :development
34
+ prerelease: false
35
+ version_requirements: *70326698892420
36
+ - !ruby/object:Gem::Dependency
37
+ name: minitest
38
+ requirement: &70326698883560 !ruby/object:Gem::Requirement
39
+ none: false
40
+ requirements:
41
+ - - ! '>='
42
+ - !ruby/object:Gem::Version
43
+ version: '2'
44
+ type: :development
45
+ prerelease: false
46
+ version_requirements: *70326698883560
47
+ - !ruby/object:Gem::Dependency
48
+ name: bundler
49
+ requirement: &70326698882180 !ruby/object:Gem::Requirement
50
+ none: false
51
+ requirements:
52
+ - - ~>
53
+ - !ruby/object:Gem::Version
54
+ version: 1.0.0
55
+ type: :development
56
+ prerelease: false
57
+ version_requirements: *70326698882180
58
+ - !ruby/object:Gem::Dependency
59
+ name: jeweler
60
+ requirement: &70326698880860 !ruby/object:Gem::Requirement
61
+ none: false
62
+ requirements:
63
+ - - ~>
64
+ - !ruby/object:Gem::Version
65
+ version: 1.6.2
66
+ type: :development
67
+ prerelease: false
68
+ version_requirements: *70326698880860
69
+ description: ! 'A simple Resque plugin that times and saves some simple metrics for
70
+ Resque jobs back into redis. Based on this system
71
+
72
+ you could build some simple auto-scaling mechanism based on the speed and ETA of
73
+ queues. Also includes a hook/callback
74
+
75
+ mechanism for recording/sending the metrics to your favorite tool (AKA statsd/graphite).
76
+
77
+ '
78
+ email: aaron@quirkey.com
79
+ executables: []
80
+ extensions: []
81
+ extra_rdoc_files:
82
+ - LICENSE.txt
83
+ - README.rdoc
84
+ files:
85
+ - .document
86
+ - Gemfile
87
+ - Gemfile.lock
88
+ - LICENSE.txt
89
+ - README.rdoc
90
+ - Rakefile
91
+ - VERSION
92
+ - lib/resque-metrics.rb
93
+ - lib/resque/metrics.rb
94
+ - test/helper.rb
95
+ - test/redis-test.conf
96
+ - test/test_resque-metrics.rb
97
+ homepage: http://github.com/quirkey/resque-metrics
98
+ licenses:
99
+ - MIT
100
+ post_install_message:
101
+ rdoc_options: []
102
+ require_paths:
103
+ - lib
104
+ required_ruby_version: !ruby/object:Gem::Requirement
105
+ none: false
106
+ requirements:
107
+ - - ! '>='
108
+ - !ruby/object:Gem::Version
109
+ version: '0'
110
+ segments:
111
+ - 0
112
+ hash: -2068879796635049629
113
+ required_rubygems_version: !ruby/object:Gem::Requirement
114
+ none: false
115
+ requirements:
116
+ - - ! '>='
117
+ - !ruby/object:Gem::Version
118
+ version: '0'
119
+ requirements: []
120
+ rubyforge_project:
121
+ rubygems_version: 1.8.10
122
+ signing_key:
123
+ specification_version: 3
124
+ summary: A Resque plugin for recording simple metrics for your jobs
125
+ test_files: []