samoli-hirefire 0.1.1
Sign up to get free protection for your applications and to get access to all the features.
- data/.document +3 -0
- data/.gitignore +4 -0
- data/.infinity_test +11 -0
- data/.rspec +3 -0
- data/Gemfile +15 -0
- data/Gemfile.lock +53 -0
- data/LICENSE.md +20 -0
- data/README.md +148 -0
- data/hirefire.gemspec +29 -0
- data/lib/hirefire.rb +111 -0
- data/lib/hirefire/backend.rb +37 -0
- data/lib/hirefire/backend/delayed_job/active_record.rb +31 -0
- data/lib/hirefire/backend/delayed_job/mongoid.rb +32 -0
- data/lib/hirefire/backend/resque/redis.rb +20 -0
- data/lib/hirefire/configuration.rb +53 -0
- data/lib/hirefire/environment.rb +103 -0
- data/lib/hirefire/environment/base.rb +213 -0
- data/lib/hirefire/environment/heroku.rb +46 -0
- data/lib/hirefire/environment/local.rb +84 -0
- data/lib/hirefire/environment/noop.rb +19 -0
- data/lib/hirefire/initializer.rb +73 -0
- data/lib/hirefire/logger.rb +98 -0
- data/lib/hirefire/railtie.rb +40 -0
- data/lib/hirefire/version.rb +13 -0
- data/lib/hirefire/workers/delayed_job.rb +5 -0
- data/lib/hirefire/workers/delayed_job/worker.rb +66 -0
- data/lib/hirefire/workers/resque.rb +31 -0
- data/lib/hirefire/workers/resque/job.rb +70 -0
- data/lib/hirefire/workers/resque/tasks.rb +21 -0
- data/lib/hirefire/workers/resque/worker.rb +57 -0
- data/spec/configuration_spec.rb +70 -0
- data/spec/environment_spec.rb +369 -0
- data/spec/logger_spec.rb +37 -0
- data/spec/spec_helper.rb +15 -0
- metadata +131 -0
data/.document
ADDED
data/.gitignore
ADDED
data/.infinity_test
ADDED
data/.rspec
ADDED
data/Gemfile
ADDED
data/Gemfile.lock
ADDED
@@ -0,0 +1,53 @@
|
|
1
|
+
GEM
|
2
|
+
remote: http://rubygems.org/
|
3
|
+
specs:
|
4
|
+
configuration (1.2.0)
|
5
|
+
diff-lcs (1.1.2)
|
6
|
+
fattr (2.2.0)
|
7
|
+
fuubar (0.0.3)
|
8
|
+
rspec (~> 2.0)
|
9
|
+
rspec-instafail (~> 0.1.4)
|
10
|
+
ruby-progressbar (~> 0.0.9)
|
11
|
+
heroku (1.20.1)
|
12
|
+
launchy (~> 0.3.2)
|
13
|
+
rest-client (< 1.7.0, >= 1.4.0)
|
14
|
+
infinity_test (1.0.2)
|
15
|
+
notifiers (>= 1.1.0)
|
16
|
+
watchr (>= 0.7)
|
17
|
+
launchy (0.3.7)
|
18
|
+
configuration (>= 0.0.5)
|
19
|
+
rake (>= 0.8.1)
|
20
|
+
mime-types (1.16)
|
21
|
+
mocha (0.9.12)
|
22
|
+
notifiers (1.1.0)
|
23
|
+
rake (0.8.7)
|
24
|
+
rest-client (1.6.1)
|
25
|
+
mime-types (>= 1.16)
|
26
|
+
rspec (2.5.0)
|
27
|
+
rspec-core (~> 2.5.0)
|
28
|
+
rspec-expectations (~> 2.5.0)
|
29
|
+
rspec-mocks (~> 2.5.0)
|
30
|
+
rspec-core (2.5.1)
|
31
|
+
rspec-expectations (2.5.0)
|
32
|
+
diff-lcs (~> 1.1.2)
|
33
|
+
rspec-instafail (0.1.6)
|
34
|
+
rspec-mocks (2.5.0)
|
35
|
+
ruby-progressbar (0.0.9)
|
36
|
+
rush (0.6.7)
|
37
|
+
session
|
38
|
+
session (3.1.0)
|
39
|
+
fattr
|
40
|
+
timecop (0.3.5)
|
41
|
+
watchr (0.7)
|
42
|
+
|
43
|
+
PLATFORMS
|
44
|
+
ruby
|
45
|
+
|
46
|
+
DEPENDENCIES
|
47
|
+
fuubar
|
48
|
+
heroku
|
49
|
+
infinity_test
|
50
|
+
mocha
|
51
|
+
rspec
|
52
|
+
rush
|
53
|
+
timecop
|
data/LICENSE.md
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
Copyright (c) 2011 Michael van Rooijen ( [@meskyanichi](http://twitter.com/#!/meskyanichi) )
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining
|
4
|
+
a copy of this software and associated documentation files (the
|
5
|
+
"Software"), to deal in the Software without restriction, including
|
6
|
+
without limitation the rights to use, copy, modify, merge, publish,
|
7
|
+
distribute, sublicense, and/or sell copies of the Software, and to
|
8
|
+
permit persons to whom the Software is furnished to do so, subject to
|
9
|
+
the following conditions:
|
10
|
+
|
11
|
+
The above copyright notice and this permission notice shall be
|
12
|
+
included in all copies or substantial portions of the Software.
|
13
|
+
|
14
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17
|
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
18
|
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
19
|
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
20
|
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,148 @@
|
|
1
|
+
HireFire - The Heroku Worker Manager
|
2
|
+
====================================
|
3
|
+
|
4
|
+
**HireFire automatically "hires" and "fires" (aka "scales") [Delayed Job](https://github.com/collectiveidea/delayed_job) and [Resque](https://github.com/defunkt/resque) workers on Heroku**. When there are no queue jobs, HireFire will fire (shut down) all workers. If there are queued jobs, then it'll hire (spin up) workers. The amount of workers that get hired depends on the amount of queued jobs (the ratio can be configured by you). HireFire is great for both high, mid and low traffic applications. It can save you a lot of money by only hiring workers when there are pending jobs, and then firing them again once all the jobs have been processed. It's also capable to dramatically reducing processing time by automatically hiring more workers when the queue size increases.
|
5
|
+
|
6
|
+
**Low traffic example** say we have a small application that doesn't process for more than 2 hours in the background a month. Meanwhile, your worker is basically just idle the rest of the 718 hours in that month. Keeping that idle worker running costs $36/month ($0.05/hour). But, for the resources you're actually **making use of** (2 hours a month), you should be paying $0.10/month, not $36/month. This is what HireFire is for.
|
7
|
+
|
8
|
+
**High traffic example** say we have a high traffic application that needs to process a lot of jobs. There may be "traffic spikes" from time to time. In this case you can take advantage of the **job\_worker\_ratio**. Since this is application-specific, HireFire allows you to define how many workers there should be running, depending on the amount of queued jobs there are (see example configuration below). HireFire will then spin up more workers as traffic increases so it can work through the queue faster, then when the jobs are all finished, it'll shut down all the workers again until the next job gets queued (in which case it'll start with only a single worker again).
|
9
|
+
|
10
|
+
**Enough with the examples!** Read on to see how to set it, and configure it to your scaling and money saving needs.
|
11
|
+
|
12
|
+
Author
|
13
|
+
------
|
14
|
+
|
15
|
+
**Michael van Rooijen ( [@meskyanichi](http://twitter.com/#!/meskyanichi) )**
|
16
|
+
|
17
|
+
Drop me a message for any questions, suggestions, requests, bugs or submit them to the [issue log](https://github.com/meskyanichi/hirefire/issues).
|
18
|
+
|
19
|
+
|
20
|
+
Setting it up
|
21
|
+
-------------
|
22
|
+
|
23
|
+
A painless process. In a Ruby on Rails environment you would do something like this.
|
24
|
+
|
25
|
+
**Rails.root/Gemfile**
|
26
|
+
|
27
|
+
gem 'rails'
|
28
|
+
# gem 'delayed_job' # uncomment this line if you use Delayed Job
|
29
|
+
# gem 'resque' # uncomment this line if you use Resque
|
30
|
+
gem 'hirefire'
|
31
|
+
|
32
|
+
**(The order is important: "Delayed Job" / "Resque" > HireFire)**
|
33
|
+
|
34
|
+
Be sure to add the following Heroku environment variables so HireFire can manage your workers.
|
35
|
+
|
36
|
+
heroku config:add HIREFIRE_EMAIL=<your_email> HIREFIRE_PASSWORD=<your_password>
|
37
|
+
|
38
|
+
These are the same email and password credentials you use to log in to the Heroku web interface to manage your workers.
|
39
|
+
|
40
|
+
And that's it. Next time you deploy to [Heroku](http://heroku.com/) it'll automatically hire and fire your workers. Now, there are defaults, but I highly recommend you configure it since it only takes a few seconds. Create an initializer file:
|
41
|
+
|
42
|
+
**Rails.root/config/initializers/hirefire.rb**
|
43
|
+
|
44
|
+
HireFire.configure do |config|
|
45
|
+
config.environment = nil # default in production is :heroku. default in development is :noop
|
46
|
+
config.max_workers = 5 # default is 1
|
47
|
+
config.min_workers = 0 # default is 0
|
48
|
+
config.job_worker_ratio = [
|
49
|
+
{ :jobs => 1, :workers => 1 },
|
50
|
+
{ :jobs => 15, :workers => 2 },
|
51
|
+
{ :jobs => 35, :workers => 3 },
|
52
|
+
{ :jobs => 60, :workers => 4 },
|
53
|
+
{ :jobs => 80, :workers => 5 }
|
54
|
+
]
|
55
|
+
end
|
56
|
+
|
57
|
+
Basically what it comes down to is that we say **NEVER** to hire more than 5 workers at a time (`config.max_workers = 5`). And then we define an array of hashes that represents our **job\_worker\_ratio**. In the above example we are basically saying:
|
58
|
+
|
59
|
+
* Hire 1 worker if there are 1-14 queued jobs
|
60
|
+
* Hire 2 workers if there are 15-34 queued jobs
|
61
|
+
* Hire 3 workers if there are 35-59 queued jobs
|
62
|
+
* Hire 4 workers if there are 60-79 queued jobs
|
63
|
+
* Hire 5 workers if there are more than 80 queued jobs
|
64
|
+
|
65
|
+
Once all the jobs in the queue have been processed, it'll fire (shut down) all the workers and start with a single worker the next time a new job gets queued. And then the next time the queue hits 15 jobs mark, in which case the single worker isn't fast enough on it's own, it'll spin up the 2nd worker again.
|
66
|
+
|
67
|
+
*If you prefer a more functional way of defining your job/worker ratio, you could use the following notation style:*
|
68
|
+
|
69
|
+
HireFire.configure do |config|
|
70
|
+
config.max_workers = 5
|
71
|
+
config.job_worker_ratio = [
|
72
|
+
{ :when => lambda {|jobs| jobs < 15 }, :workers => 1 },
|
73
|
+
{ :when => lambda {|jobs| jobs < 35 }, :workers => 2 },
|
74
|
+
{ :when => lambda {|jobs| jobs < 60 }, :workers => 3 },
|
75
|
+
{ :when => lambda {|jobs| jobs < 80 }, :workers => 4 }
|
76
|
+
]
|
77
|
+
end
|
78
|
+
|
79
|
+
The above notation is slightly different, since now you basically define how many workers to hire when `jobs < n`. So for example if there are 80 or more jobs, it'll hire the `max_workers` amount, which is `5` in the above example. If you change the `max_workers = 5` to `max_workers = 10`, then if there are 80 or more jobs queued, it'll go from 4 to 10 workers.
|
80
|
+
|
81
|
+
|
82
|
+
In a non-Ruby on Rails environment
|
83
|
+
----------------------------------
|
84
|
+
|
85
|
+
Almost the same setup, except that you have to initialize HireFire yourself after Delayed Job or Resque is done loading.
|
86
|
+
|
87
|
+
require 'delayed_job'
|
88
|
+
# require 'delayed_job' # uncomment this line if you use Delayed Job
|
89
|
+
# require 'resque' # uncomment this line if you use Resque
|
90
|
+
HireFire::Initializer.initialize!
|
91
|
+
|
92
|
+
**(Again, the order is important: "Delayed Job" / "Resque" > HireFire)**
|
93
|
+
|
94
|
+
If all goes well you should see a message similar to this when you boot your application:
|
95
|
+
|
96
|
+
[HireFire] Delayed::Backend::ActiveRecord::Job detected!
|
97
|
+
|
98
|
+
|
99
|
+
Worker / Mapper Support
|
100
|
+
--------------
|
101
|
+
|
102
|
+
HireFire currently works with the following worker and mapper libraries:
|
103
|
+
|
104
|
+
- [Delayed Job](https://github.com/collectiveidea/delayed_job)
|
105
|
+
- [ActiveRecord ORM](https://github.com/rails/rails/tree/master/activerecord)
|
106
|
+
- [Mongoid ODM](https://github.com/mongoid/mongoid) (using [delayed_job_mongoid](https://github.com/collectiveidea/delayed_job_mongoid))
|
107
|
+
|
108
|
+
- [Resque](https://github.com/defunkt/resque)
|
109
|
+
- [Redis](https://github.com/ezmobius/redis-rb)
|
110
|
+
|
111
|
+
|
112
|
+
Frequently Asked Questions
|
113
|
+
--------------------------
|
114
|
+
|
115
|
+
- **Question:** *Does it start workers immediately after a job gets queued?*
|
116
|
+
- **Answer:** Yes, once a new job gets queued it'll immediately calculate the amount of workers that are required and hire them accordingly.
|
117
|
+
|
118
|
+
- **Question:** *Does it stop workers immediately when there are no jobs to be processed?*
|
119
|
+
- **Answer:** Yes, every worker has been made self-aware to see this. Once there are no jobs to be processed, all workers will immediately be fired (shut down). *For example, if you have no jobs in the queue, and you start cranking up your Workers via Heroku's web ui, once the worker spawns and sees it has nothing to do, it'll immediately shut itself down.*
|
120
|
+
|
121
|
+
- **Question:** *How does this save me money?*
|
122
|
+
- **Answer:** According to Heroku's documentation, Workers (same as Dynos), are prorated to the second. *For example, say that 10 jobs get queued and a worker is spawned to process them and takes about 1 minute to do so and then shuts itself down, theoretically you only pay $0.0008.*
|
123
|
+
|
124
|
+
- **Question:** *With Delayed Job you can set the :run_at to a time in the future.*
|
125
|
+
- **Answer:** Unfortunately since we cannot spawn a monitoring process on the Heroku platform, HireFire will not hire workers until a job gets queued. This means that if you set the :run_at time a few minutes in the future, and these few minutes pass, the job will not be processed until a new job gets queued which triggers the chain of events. (Best to avoid using `run_at` with Delayed Job when using HireFire unless you have a mid-high traffic web application in which cause HireFire gets triggered enough times)
|
126
|
+
|
127
|
+
- **Question:** *If a job is set to run at a time in the future, will workers remain hired to wait for this job to be "processable"?*
|
128
|
+
- **Answer:** No, because if you enqueue a job to run 3 hours from the time it was enqueued, you might have workers doing nothing the coming 3 hours. Best to avoid scheduling jobs to be processed in the future.
|
129
|
+
|
130
|
+
- **Question:** *Will it scale down workers from, for example, 5 to 4?*
|
131
|
+
- **Answer:** No, I have consciously chosen not to do that for 2 reasons:
|
132
|
+
1. There is no way to tell which worker is currently processing a job, so it might fire a worker that was busy, causing the job to be exit during the process.
|
133
|
+
2. Does it really matter? Your jobs will be processed faster, and once the queue is completely empty, all workers will be fire anyway. (You could call this a feature! Since 5 jobs process faster than 4, but the cost remains the same cause it's all pro-rated to the second)
|
134
|
+
|
135
|
+
- **Question:** *Will running jobs concurrently (with multiple Worker) cost more?*
|
136
|
+
- **Answer:** Actually, no. Since worker's are pro-rated to the second, the moment you hire 3 workers, it costs 3 times more, but it also processes 3 times faster. You could also let 1 worker process all the jobs rather than 3, but that means it'll still cost the same amount as when you hire 3 workers, since it takes 3 times longer to process.
|
137
|
+
|
138
|
+
- **Question:** *Can I process jobs faster with HireFire?*
|
139
|
+
- **Answer:** When you run multiple jobs concurrently, you can speed up your processing dramatically. *Normally you wouldn't set the workers to 10 for example, but with HireFire you can tell it to Hire 10 workers when there are 50 jobs (would normally be overkill and cost you A LOT of money) but since (see Q/A above) Workers are pro-rated to the second, and HireFire immediately fires all workers once all the jobs in the queue have been processed, it makes no different whether you have a single worker processing 50 jobs, or 5 workers, or even 10 workers. It processes 10 times faster, but costs the same.*
|
140
|
+
|
141
|
+
|
142
|
+
|
143
|
+
Other potentially interesting gems
|
144
|
+
----------------------------------
|
145
|
+
|
146
|
+
* [Backup](https://github.com/meskyanichi/backup)
|
147
|
+
* [GitPusshuTen](https://github.com/meskyanichi/gitpusshuten)
|
148
|
+
* [Mongoid::Paperclip](https://github.com/meskyanichi/mongoid-paperclip)
|
data/hirefire.gemspec
ADDED
@@ -0,0 +1,29 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
require File.expand_path(File.dirname(__FILE__) + '/lib/hirefire')
|
4
|
+
|
5
|
+
Gem::Specification.new do |gem|
|
6
|
+
|
7
|
+
##
|
8
|
+
# General configuration / information
|
9
|
+
gem.name = 'samoli-hirefire'
|
10
|
+
gem.version = HireFire::Version.current
|
11
|
+
gem.platform = Gem::Platform::RUBY
|
12
|
+
gem.authors = 'Michael van Rooijen'
|
13
|
+
gem.email = 'meskyanichi@gmail.com'
|
14
|
+
gem.homepage = 'http://rubygems.org/gems/hirefire'
|
15
|
+
gem.summary = %|HireFire automatically "hires" and "fires" (aka "scales") Delayed Job and Resque workers on Heroku.|
|
16
|
+
gem.description = %|HireFire automatically "hires" and "fires" (aka "scales") Delayed Job and Resque workers on Heroku. When there are no queue jobs, HireFire will fire (shut down) all workers. If there are queued jobs, then it'll hire (spin up) workers. The amount of workers that get hired depends on the amount of queued jobs (the ratio can be configured by you). HireFire is great for both high, mid and low traffic applications. It can save you a lot of money by only hiring workers when there are pending jobs, and then firing them again once all the jobs have been processed. It's also capable to dramatically reducing processing time by automatically hiring more workers when the queue size increases.|
|
17
|
+
|
18
|
+
##
|
19
|
+
# Files and folder that need to be compiled in to the Ruby Gem
|
20
|
+
gem.files = %x[git ls-files].split("\n")
|
21
|
+
gem.test_files = %x[git ls-files -- {spec}/*].split("\n")
|
22
|
+
gem.require_path = 'lib'
|
23
|
+
|
24
|
+
##
|
25
|
+
# Production gem dependencies
|
26
|
+
gem.add_dependency 'heroku', ['~> 1.20.1']
|
27
|
+
gem.add_dependency 'rush', ['~> 0.6.7']
|
28
|
+
|
29
|
+
end
|
data/lib/hirefire.rb
ADDED
@@ -0,0 +1,111 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
module HireFire
|
4
|
+
|
5
|
+
##
|
6
|
+
# HireFire constants
|
7
|
+
LIB_PATH = File.dirname(__FILE__)
|
8
|
+
HIREFIRE_PATH = File.join(LIB_PATH, 'hirefire')
|
9
|
+
ENVIRONMENT_PATH = File.join(HIREFIRE_PATH, 'environment')
|
10
|
+
BACKEND_PATH = File.join(HIREFIRE_PATH, 'backend')
|
11
|
+
WORKERS_PATH = File.join(HIREFIRE_PATH, 'workers')
|
12
|
+
|
13
|
+
##
|
14
|
+
# HireFire namespace
|
15
|
+
autoload :Configuration, File.join(HIREFIRE_PATH, 'configuration')
|
16
|
+
autoload :Environment, File.join(HIREFIRE_PATH, 'environment')
|
17
|
+
autoload :Initializer, File.join(HIREFIRE_PATH, 'initializer')
|
18
|
+
autoload :Backend, File.join(HIREFIRE_PATH, 'backend')
|
19
|
+
autoload :Logger, File.join(HIREFIRE_PATH, 'logger')
|
20
|
+
autoload :Version, File.join(HIREFIRE_PATH, 'version')
|
21
|
+
|
22
|
+
##
|
23
|
+
# HireFire::Environment namespace
|
24
|
+
module Environment
|
25
|
+
autoload :Base, File.join(ENVIRONMENT_PATH, 'base')
|
26
|
+
autoload :Heroku, File.join(ENVIRONMENT_PATH, 'heroku')
|
27
|
+
autoload :Local, File.join(ENVIRONMENT_PATH, 'local')
|
28
|
+
autoload :Noop, File.join(ENVIRONMENT_PATH, 'noop')
|
29
|
+
end
|
30
|
+
|
31
|
+
##
|
32
|
+
# HireFire::Workers namespace
|
33
|
+
module Workers
|
34
|
+
autoload :DelayedJob, File.join(WORKERS_PATH, 'delayed_job')
|
35
|
+
autoload :Resque, File.join(WORKERS_PATH, 'resque')
|
36
|
+
end
|
37
|
+
|
38
|
+
##
|
39
|
+
# HireFire::Backend namespace
|
40
|
+
module Backend
|
41
|
+
DELAYED_JOB_PATH = File.join(BACKEND_PATH, 'delayed_job')
|
42
|
+
RESQUE_PATH = File.join(BACKEND_PATH, 'resque')
|
43
|
+
|
44
|
+
##
|
45
|
+
# HireFire::Backend::DelayedJob namespace
|
46
|
+
module DelayedJob
|
47
|
+
autoload :ActiveRecord, File.join(DELAYED_JOB_PATH, 'active_record')
|
48
|
+
autoload :Mongoid, File.join(DELAYED_JOB_PATH, 'mongoid')
|
49
|
+
end
|
50
|
+
|
51
|
+
##
|
52
|
+
# HireFire::Backend::Resque namespace
|
53
|
+
module Resque
|
54
|
+
autoload :Redis, File.join(RESQUE_PATH, 'redis')
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
##
|
59
|
+
# This method is used to configure HireFire
|
60
|
+
#
|
61
|
+
# @yield [config] the instance of HireFire::Configuration class
|
62
|
+
# @yieldparam [Fixnum] max_workers default: 1 (set at least 1)
|
63
|
+
# @yieldparam [Array] job_worker_ratio default: see example
|
64
|
+
# @yieldparam [Symbol, nil] environment (:heroku, :local, :noop or nil) - default: nil
|
65
|
+
#
|
66
|
+
# @note Every param has it's own defaults. It's best to leave the environment param at "nil".
|
67
|
+
# When environment is set to "nil", it'll default to the :noop environment. This basically means
|
68
|
+
# that you have to run "rake jobs:work" yourself from the console to get the jobs running in development mode.
|
69
|
+
# In production, it'll automatically use :heroku if deployed to the Heroku platform.
|
70
|
+
#
|
71
|
+
# @example
|
72
|
+
# HireFire.configure do |config|
|
73
|
+
# config.environment = nil
|
74
|
+
# config.max_workers = 5
|
75
|
+
# config.job_worker_ratio = [
|
76
|
+
# { :jobs => 1, :workers => 1 },
|
77
|
+
# { :jobs => 15, :workers => 2 },
|
78
|
+
# { :jobs => 35, :workers => 3 },
|
79
|
+
# { :jobs => 60, :workers => 4 },
|
80
|
+
# { :jobs => 80, :workers => 5 }
|
81
|
+
# ]
|
82
|
+
# end
|
83
|
+
#
|
84
|
+
# @return [nil]
|
85
|
+
def self.configure
|
86
|
+
yield(configuration); nil
|
87
|
+
end
|
88
|
+
|
89
|
+
##
|
90
|
+
# Instantiates a new HireFire::Configuration
|
91
|
+
# instance and instance variable caches it
|
92
|
+
def self.configuration
|
93
|
+
@configuration ||= HireFire::Configuration.new
|
94
|
+
end
|
95
|
+
|
96
|
+
end
|
97
|
+
|
98
|
+
##
|
99
|
+
# If Ruby on Rails is detected, it'll automatically initialize HireFire
|
100
|
+
# so that the developer doesn't have to manually invoke it from an initializer file
|
101
|
+
#
|
102
|
+
# Users not using Ruby on Rails will have to run "HireFire::Initializer.initialize!"
|
103
|
+
# in their application manually, after loading the worker library (either "Delayed Job" or "Resque")
|
104
|
+
# and the desired mapper (ActiveRecord, Mongoid or Redis)
|
105
|
+
if defined?(Rails)
|
106
|
+
if Rails.version >= '3.0.0'
|
107
|
+
require File.join(HireFire::HIREFIRE_PATH, 'railtie')
|
108
|
+
else
|
109
|
+
HireFire::Initializer.initialize!
|
110
|
+
end
|
111
|
+
end
|
@@ -0,0 +1,37 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
module HireFire
|
4
|
+
module Backend
|
5
|
+
|
6
|
+
##
|
7
|
+
# Load the correct module (ActiveRecord, Mongoid or Redis)
|
8
|
+
# based on which worker and backends are loaded
|
9
|
+
#
|
10
|
+
# Currently supports:
|
11
|
+
# - Delayed Job with ActiveRecord and Mongoid
|
12
|
+
# - Resque with Redis
|
13
|
+
#
|
14
|
+
# @return [nil]
|
15
|
+
def self.included(base)
|
16
|
+
|
17
|
+
##
|
18
|
+
# Delayed Job specific backends
|
19
|
+
if defined?(::Delayed::Job)
|
20
|
+
if defined?(::Delayed::Backend::ActiveRecord::Job)
|
21
|
+
base.send(:include, HireFire::Backend::DelayedJob::ActiveRecord)
|
22
|
+
end
|
23
|
+
|
24
|
+
if defined?(::Delayed::Backend::Mongoid::Job)
|
25
|
+
base.send(:include, HireFire::Backend::DelayedJob::Mongoid)
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
##
|
30
|
+
# Resque specific backends
|
31
|
+
if defined?(::Resque)
|
32
|
+
base.send(:include, HireFire::Backend::Resque::Redis)
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
end
|
37
|
+
end
|