bluth 0.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/CHANGES.txt +6 -0
- data/LICENSE.txt +19 -0
- data/README.rdoc +14 -0
- data/Rakefile +68 -0
- data/VERSION.yml +4 -0
- data/bluth.gemspec +54 -0
- data/lib/bluth.rb +211 -0
- data/lib/bluth/gob.rb +180 -0
- data/lib/bluth/worker.rb +356 -0
- data/lib/daemonizing.rb +221 -0
- metadata +108 -0
data/CHANGES.txt
ADDED
data/LICENSE.txt
ADDED
@@ -0,0 +1,19 @@
|
|
1
|
+
Copyright (c) 2010-2011 Solutious Inc, Delano Mandelbaum
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
4
|
+
of this software and associated documentation files (the "Software"), to deal
|
5
|
+
in the Software without restriction, including without limitation the rights
|
6
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
7
|
+
copies of the Software, and to permit persons to whom the Software is
|
8
|
+
furnished to do so, subject to the following conditions:
|
9
|
+
|
10
|
+
The above copyright notice and this permission notice shall be included in
|
11
|
+
all copies or substantial portions of the Software.
|
12
|
+
|
13
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
14
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
15
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
16
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
17
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
18
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
19
|
+
THE SOFTWARE.
|
data/README.rdoc
ADDED
@@ -0,0 +1,14 @@
|
|
1
|
+
# Bluth - 0.8 BETA
|
2
|
+
|
3
|
+
**A Redis queuing system built on top of Familia**
|
4
|
+
|
5
|
+
|
6
|
+
## More Information
|
7
|
+
|
8
|
+
* [Codes](http://github.com/delano/bluth)
|
9
|
+
* [RDocs](http://delano.github.com/bluth)
|
10
|
+
|
11
|
+
## Credits
|
12
|
+
|
13
|
+
* [Delano Mandelbaum](http://solutious.com)
|
14
|
+
* Bluth lego by [Baby Elephant](http://www.flickr.com/photos/baby_elephant/454780652/)
|
data/Rakefile
ADDED
@@ -0,0 +1,68 @@
|
|
1
|
+
require "rubygems"
|
2
|
+
require "rake"
|
3
|
+
require "rake/clean"
|
4
|
+
require 'yaml'
|
5
|
+
|
6
|
+
begin
|
7
|
+
require 'hanna/rdoctask'
|
8
|
+
rescue LoadError
|
9
|
+
require 'rake/rdoctask'
|
10
|
+
end
|
11
|
+
|
12
|
+
config = YAML.load_file("VERSION.yml")
|
13
|
+
task :default => ["build"]
|
14
|
+
CLEAN.include [ 'pkg', 'doc' ]
|
15
|
+
name = "bluth"
|
16
|
+
|
17
|
+
begin
|
18
|
+
require "jeweler"
|
19
|
+
Jeweler::Tasks.new do |gem|
|
20
|
+
gem.version = "#{config[:MAJOR]}.#{config[:MINOR]}.#{config[:PATCH]}"
|
21
|
+
gem.name = name
|
22
|
+
gem.rubyforge_project = gem.name
|
23
|
+
gem.summary = "A Redis queuing system built on top of Familia"
|
24
|
+
gem.description = "A Redis queuing system built on top of Familia"
|
25
|
+
gem.email = "delano@solutious.com"
|
26
|
+
gem.homepage = "http://github.com/delano/bluth"
|
27
|
+
gem.authors = ["Delano Mandelbaum"]
|
28
|
+
gem.add_dependency("familia", ">= 0.5.3")
|
29
|
+
gem.add_dependency('sysinfo', '>= 0.7.3')
|
30
|
+
|
31
|
+
#gem.add_development_dependency("rspec", ">= 1.2.9")
|
32
|
+
#gem.add_development_dependency("mocha", ">= 0.9.8")
|
33
|
+
end
|
34
|
+
Jeweler::GemcutterTasks.new
|
35
|
+
rescue LoadError
|
36
|
+
puts "Jeweler (or a dependency) not available. Install it with: sudo gem install jeweler"
|
37
|
+
end
|
38
|
+
|
39
|
+
|
40
|
+
Rake::RDocTask.new do |rdoc|
|
41
|
+
version = "#{config[:MAJOR]}.#{config[:MINOR]}.#{config[:PATCH]}.#{config[:BUILD]}"
|
42
|
+
rdoc.rdoc_dir = "doc"
|
43
|
+
rdoc.title = "#{name} #{version}"
|
44
|
+
rdoc.rdoc_files.include("README*")
|
45
|
+
rdoc.rdoc_files.include("LICENSE.txt")
|
46
|
+
rdoc.rdoc_files.include("bin/*.rb")
|
47
|
+
rdoc.rdoc_files.include("lib/**/*.rb")
|
48
|
+
end
|
49
|
+
|
50
|
+
|
51
|
+
# Rubyforge Release / Publish Tasks ==================================
|
52
|
+
|
53
|
+
#about 'Publish website to rubyforge'
|
54
|
+
task 'publish:rdoc' => 'doc/index.html' do
|
55
|
+
#sh "scp -rp doc/* rubyforge.org:/var/www/gforge-projects/#{name}/"
|
56
|
+
end
|
57
|
+
|
58
|
+
#about 'Public release to rubyforge'
|
59
|
+
task 'publish:gem' => [:package] do |t|
|
60
|
+
sh <<-end
|
61
|
+
rubyforge add_release -o Any -a CHANGES.txt -f -n README.md #{name} #{name} #{@spec.version} pkg/#{name}-#{@spec.version}.gem &&
|
62
|
+
rubyforge add_file -o Any -a CHANGES.txt -f -n README.md #{name} #{name} #{@spec.version} pkg/#{name}-#{@spec.version}.tgz
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
|
67
|
+
|
68
|
+
|
data/VERSION.yml
ADDED
data/bluth.gemspec
ADDED
@@ -0,0 +1,54 @@
|
|
1
|
+
# Generated by jeweler
|
2
|
+
# DO NOT EDIT THIS FILE DIRECTLY
|
3
|
+
# Instead, edit Jeweler::Tasks in Rakefile, and run the gemspec command
|
4
|
+
# -*- encoding: utf-8 -*-
|
5
|
+
|
6
|
+
Gem::Specification.new do |s|
|
7
|
+
s.name = %q{bluth}
|
8
|
+
s.version = "0.5.2"
|
9
|
+
|
10
|
+
s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
|
11
|
+
s.authors = ["Delano Mandelbaum"]
|
12
|
+
s.date = %q{2010-12-10}
|
13
|
+
s.description = %q{A Redis queuing system built on top of Familia}
|
14
|
+
s.email = %q{delano@solutious.com}
|
15
|
+
s.extra_rdoc_files = [
|
16
|
+
"LICENSE.txt",
|
17
|
+
"README.rdoc"
|
18
|
+
]
|
19
|
+
s.files = [
|
20
|
+
"CHANGES.txt",
|
21
|
+
"LICENSE.txt",
|
22
|
+
"README.rdoc",
|
23
|
+
"Rakefile",
|
24
|
+
"VERSION.yml",
|
25
|
+
"bluth.gemspec",
|
26
|
+
"lib/bluth.rb",
|
27
|
+
"lib/bluth/gob.rb",
|
28
|
+
"lib/bluth/worker.rb",
|
29
|
+
"lib/daemonizing.rb"
|
30
|
+
]
|
31
|
+
s.homepage = %q{http://github.com/delano/bluth}
|
32
|
+
s.rdoc_options = ["--charset=UTF-8"]
|
33
|
+
s.require_paths = ["lib"]
|
34
|
+
s.rubyforge_project = %q{bluth}
|
35
|
+
s.rubygems_version = %q{1.3.7}
|
36
|
+
s.summary = %q{A Redis queuing system built on top of Familia}
|
37
|
+
|
38
|
+
if s.respond_to? :specification_version then
|
39
|
+
current_version = Gem::Specification::CURRENT_SPECIFICATION_VERSION
|
40
|
+
s.specification_version = 3
|
41
|
+
|
42
|
+
if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
|
43
|
+
s.add_runtime_dependency(%q<familia>, [">= 0.5.3"])
|
44
|
+
s.add_runtime_dependency(%q<sysinfo>, [">= 0.7.3"])
|
45
|
+
else
|
46
|
+
s.add_dependency(%q<familia>, [">= 0.5.3"])
|
47
|
+
s.add_dependency(%q<sysinfo>, [">= 0.7.3"])
|
48
|
+
end
|
49
|
+
else
|
50
|
+
s.add_dependency(%q<familia>, [">= 0.5.3"])
|
51
|
+
s.add_dependency(%q<sysinfo>, [">= 0.7.3"])
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
data/lib/bluth.rb
ADDED
@@ -0,0 +1,211 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
BLUTH_LIB_HOME = File.expand_path File.dirname(__FILE__) unless defined?(BLUTH_LIB_HOME)
|
3
|
+
|
4
|
+
require 'sysinfo'
|
5
|
+
require 'familia'
|
6
|
+
|
7
|
+
|
8
|
+
module Bluth
|
9
|
+
module VERSION
|
10
|
+
def self.to_s
|
11
|
+
load_config
|
12
|
+
[@version[:MAJOR], @version[:MINOR], @version[:PATCH]].join('.')
|
13
|
+
end
|
14
|
+
alias_method :inspect, :to_s
|
15
|
+
def self.load_config
|
16
|
+
require 'yaml'
|
17
|
+
@version ||= YAML.load_file(File.join(BLUTH_LIB_HOME, '..', 'VERSION.yml'))
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
module Bluth
|
23
|
+
# A fatal error. Gob fails.
|
24
|
+
class Buster < Familia::Problem; end
|
25
|
+
# A non-fatal error. Gob succeeds.
|
26
|
+
class Maeby < Familia::Problem; end
|
27
|
+
# A shutdown request. We burn down the banana stand.
|
28
|
+
class Shutdown < Familia::Problem; end
|
29
|
+
|
30
|
+
@db = 15
|
31
|
+
@queues = {}
|
32
|
+
@poptimeout = 60.seconds
|
33
|
+
@handlers = []
|
34
|
+
@locks = []
|
35
|
+
@sysinfo = nil
|
36
|
+
@priority = []
|
37
|
+
@scheduler = nil
|
38
|
+
class << self
|
39
|
+
attr_reader :queues, :handlers, :db, :conf, :locks
|
40
|
+
attr_accessor :redis, :uri, :priority, :scheduler, :poptimeout
|
41
|
+
def sysinfo
|
42
|
+
@sysinfo ||= SysInfo.new.freeze
|
43
|
+
@sysinfo
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
def Bluth.clear_locks
|
48
|
+
@locks.each { |lock|
|
49
|
+
Familia.info "Removing lock #{lock}"
|
50
|
+
Bluth.redis.del lock
|
51
|
+
}
|
52
|
+
end
|
53
|
+
|
54
|
+
def Bluth.queue?(n)
|
55
|
+
@queues.has_key?(n.to_sym)
|
56
|
+
end
|
57
|
+
def Bluth.queue(n)
|
58
|
+
@queues[n.to_sym]
|
59
|
+
end
|
60
|
+
|
61
|
+
def Bluth.conf=(conf={})
|
62
|
+
@conf = conf.clone
|
63
|
+
@conf[:db] = @db
|
64
|
+
connect!
|
65
|
+
@conf
|
66
|
+
end
|
67
|
+
|
68
|
+
def Bluth.connect!
|
69
|
+
@uri = Redis.uri(@conf).freeze
|
70
|
+
@redis = Familia.connect @uri
|
71
|
+
end
|
72
|
+
|
73
|
+
def Bluth.find_locks
|
74
|
+
@locks = Bluth.redis.keys(Familia.key('*', :lock))
|
75
|
+
end
|
76
|
+
|
77
|
+
class Queue
|
78
|
+
include Familia
|
79
|
+
prefix :queue
|
80
|
+
def self.rangeraw(count=100)
|
81
|
+
gobids = Queue.redis.lrange(key, 0, count-1) || []
|
82
|
+
end
|
83
|
+
def self.range(count=100)
|
84
|
+
gobids = rangeraw count
|
85
|
+
gobids.collect { |gobid|
|
86
|
+
gob = Gob.from_redis gobid
|
87
|
+
next if gob.nil?
|
88
|
+
gob.current_queue = self
|
89
|
+
gob
|
90
|
+
}.compact
|
91
|
+
end
|
92
|
+
def self.dequeue(gobid)
|
93
|
+
Queue.redis.lrem key, 0, gobid
|
94
|
+
end
|
95
|
+
def self.inherited(obj)
|
96
|
+
obj.prefix self.prefix
|
97
|
+
obj.suffix obj.to_s.split('::').last.downcase.to_sym
|
98
|
+
raise Buster.new("Duplicate queue: #{obj.suffix}") if Bluth.queue?(obj.suffix)
|
99
|
+
Bluth.queues[obj.suffix] = obj
|
100
|
+
super(obj)
|
101
|
+
end
|
102
|
+
def self.key(pref=nil,suff=nil)
|
103
|
+
Familia.key( pref || prefix, suff || suffix)
|
104
|
+
end
|
105
|
+
def self.report
|
106
|
+
Bluth.queues.keys.collect { |q|
|
107
|
+
klass = Bluth.queue(q)
|
108
|
+
("%10s: %4d" % [q, klass.size])
|
109
|
+
}.join($/)
|
110
|
+
end
|
111
|
+
def self.from_string(str)
|
112
|
+
raise Buster, "Unknown queue: #{str}" unless Bluth.queue?(str)
|
113
|
+
Bluth.queue(str)
|
114
|
+
end
|
115
|
+
def self.any?
|
116
|
+
size > 0
|
117
|
+
end
|
118
|
+
|
119
|
+
def self.empty?
|
120
|
+
size == 0
|
121
|
+
end
|
122
|
+
|
123
|
+
def self.size
|
124
|
+
begin
|
125
|
+
Queue.redis.llen key
|
126
|
+
rescue => ex
|
127
|
+
STDERR.puts ex.message, ex.backtrace
|
128
|
+
0
|
129
|
+
end
|
130
|
+
end
|
131
|
+
def self.push(gobid)
|
132
|
+
Queue.redis.lpush self.key, gobid
|
133
|
+
end
|
134
|
+
|
135
|
+
def self.pop
|
136
|
+
gobid = Queue.redis.rpoplpush key, Bluth::Running.key
|
137
|
+
return if gobid.nil?
|
138
|
+
Familia.ld "FOUND gob #{gobid} from #{self.key}"
|
139
|
+
gob = Gob.from_redis gobid
|
140
|
+
if gob.nil?
|
141
|
+
Familia.info "No such gob object: #{gobid}"
|
142
|
+
Bluth::Running.dequeue gobid
|
143
|
+
return
|
144
|
+
end
|
145
|
+
gob.current_queue = Bluth::Running
|
146
|
+
gob.save
|
147
|
+
gob
|
148
|
+
end
|
149
|
+
end
|
150
|
+
|
151
|
+
# Workers use a blocking pop and will wait for up to
|
152
|
+
# Bluth.poptimeout (seconds) before returnning nil.
|
153
|
+
# Note that the queues are still processed in order.
|
154
|
+
# If all queues are empty, the first one to return a
|
155
|
+
# value is use. See:
|
156
|
+
#
|
157
|
+
# http://code.google.com/p/redis/wiki/BlpopCommand
|
158
|
+
def Bluth.pop
|
159
|
+
#Bluth.priority.each { |queue|
|
160
|
+
# ret = queue.pop
|
161
|
+
# return ret unless ret.nil?
|
162
|
+
#}
|
163
|
+
begin
|
164
|
+
#Familia.ld :BRPOP, Queue.redis, self, caller[1] if Familia.debug?
|
165
|
+
order = Bluth.priority.collect { |queue| queue.key }
|
166
|
+
order << Bluth.poptimeout # We do it this way to support Ruby 1.8
|
167
|
+
gobinfo = Bluth::Queue.redis.brpop *order
|
168
|
+
unless gobinfo.nil?
|
169
|
+
Familia.info "FOUND #{gobinfo.inspect}" if Familia.debug?
|
170
|
+
gob = Gob.from_redis gobinfo[1]
|
171
|
+
raise Bluth::Buster, "No such gob object: #{gobinfo[1]}" if gob.nil?
|
172
|
+
Bluth::Running.push gob.id
|
173
|
+
gob.current_queue = Bluth::Running
|
174
|
+
gob.save
|
175
|
+
end
|
176
|
+
rescue => ex
|
177
|
+
if gobinfo.nil?
|
178
|
+
Familia.info "ERROR: #{ex.message}"
|
179
|
+
else
|
180
|
+
Familia.info "ERROR (#{ex.message}); putting #{gobinfo[1]} back on queue"
|
181
|
+
Bluth::Orphaned.push gobinfo[1]
|
182
|
+
end
|
183
|
+
end
|
184
|
+
gob
|
185
|
+
end
|
186
|
+
|
187
|
+
class Critical < Queue
|
188
|
+
end
|
189
|
+
class High < Queue
|
190
|
+
end
|
191
|
+
class Low < Queue
|
192
|
+
end
|
193
|
+
class Running < Queue
|
194
|
+
end
|
195
|
+
class Failed < Queue
|
196
|
+
end
|
197
|
+
class Successful < Queue
|
198
|
+
end
|
199
|
+
class Scheduled < Queue
|
200
|
+
end
|
201
|
+
class Orphaned < Queue
|
202
|
+
end
|
203
|
+
|
204
|
+
require 'bluth/gob'
|
205
|
+
require 'bluth/worker'
|
206
|
+
|
207
|
+
Bluth.priority = [Bluth::Critical, Bluth::High, Bluth::Low]
|
208
|
+
Bluth.scheduler = ScheduleWorker
|
209
|
+
|
210
|
+
end
|
211
|
+
|
data/lib/bluth/gob.rb
ADDED
@@ -0,0 +1,180 @@
|
|
1
|
+
|
2
|
+
|
3
|
+
module Bluth
|
4
|
+
|
5
|
+
class Gob < Storable
|
6
|
+
MAX_ATTEMPTS = 3.freeze unless defined?(Gob::MAX_ATTEMPTS)
|
7
|
+
include Familia
|
8
|
+
prefix :gob
|
9
|
+
ttl 1.hour
|
10
|
+
field :id => Gibbler::Digest
|
11
|
+
field :kind => String
|
12
|
+
field :data => Hash
|
13
|
+
field :messages => Array
|
14
|
+
field :attempts => Integer
|
15
|
+
field :create_time => Float
|
16
|
+
field :stime => Float
|
17
|
+
field :etime => Float
|
18
|
+
field :current_queue => String
|
19
|
+
field :thread_id => Integer
|
20
|
+
field :cpu => Array
|
21
|
+
field :wid => Gibbler::Digest
|
22
|
+
|
23
|
+
def self.inherited(obj)
|
24
|
+
obj.extend Bluth::Gob::ClassMethods
|
25
|
+
obj.prefix [:job, obj.to_s.split('::').last.downcase].join(':')
|
26
|
+
Bluth.handlers << obj
|
27
|
+
end
|
28
|
+
|
29
|
+
module ClassMethods
|
30
|
+
def clear
|
31
|
+
keys.each do |key|
|
32
|
+
Gob.redis.del key
|
33
|
+
end
|
34
|
+
end
|
35
|
+
def enqueue(data={},q=nil)
|
36
|
+
q ||= self.queue
|
37
|
+
job = Gob.create generate_id(data), self, data
|
38
|
+
job.current_queue = q
|
39
|
+
Familia.ld "ENQUEUING: #{self} #{job.id.short} to #{q}"
|
40
|
+
Bluth::Queue.redis.lpush q.key, job.id
|
41
|
+
job.create_time = Time.now.utc.to_f
|
42
|
+
job.attempts = 0
|
43
|
+
job
|
44
|
+
end
|
45
|
+
def queue(name=nil)
|
46
|
+
@queue = name if name
|
47
|
+
@queue || Bluth::High
|
48
|
+
end
|
49
|
+
def generate_id(*args)
|
50
|
+
a = [self, Process.pid, Bluth.sysinfo.hostname, Time.now.to_f, *args]
|
51
|
+
a.gibbler
|
52
|
+
end
|
53
|
+
def all
|
54
|
+
Bluth::Gob.all.select do |job|
|
55
|
+
job.kind == self
|
56
|
+
end
|
57
|
+
end
|
58
|
+
def size
|
59
|
+
all.size
|
60
|
+
end
|
61
|
+
def lock_key
|
62
|
+
Familia.key(prefix, :lock)
|
63
|
+
end
|
64
|
+
def lock!
|
65
|
+
raise Bluth::Buster, "#{self} is already locked!" if locked?
|
66
|
+
Familia.info "Locking #{self}"
|
67
|
+
ret = Bluth::Gob.redis.set lock_key, 1
|
68
|
+
Bluth.locks << lock_key
|
69
|
+
ret == 'OK'
|
70
|
+
end
|
71
|
+
def unlock!
|
72
|
+
Familia.info "Unlocking #{self}"
|
73
|
+
ret = Bluth::Gob.redis.del lock_key
|
74
|
+
Bluth.locks.delete lock_key
|
75
|
+
ret
|
76
|
+
end
|
77
|
+
def locked?
|
78
|
+
Bluth::Gob.redis.exists lock_key
|
79
|
+
end
|
80
|
+
def prepare
|
81
|
+
end
|
82
|
+
|
83
|
+
[:success, :failure, :running].each do |w|
|
84
|
+
define_method "#{w}_key" do # success_key
|
85
|
+
Familia.key(self.prefix, w)
|
86
|
+
end
|
87
|
+
define_method "#{w}!" do |*args| # success!(1)
|
88
|
+
by = args.first || 1
|
89
|
+
Bluth::Gob.redis.incrby send("#{w}_key"), by
|
90
|
+
end
|
91
|
+
define_method "#{w}" do # success
|
92
|
+
Bluth::Gob.redis.get(send("#{w}_key")).to_i
|
93
|
+
end
|
94
|
+
end
|
95
|
+
end
|
96
|
+
|
97
|
+
def id
|
98
|
+
@id = Gibbler::Digest.new(@id) if String === @id
|
99
|
+
end
|
100
|
+
def clear!
|
101
|
+
@attempts = 0
|
102
|
+
@messages = []
|
103
|
+
save
|
104
|
+
end
|
105
|
+
def preprocess
|
106
|
+
@attempts ||= 0
|
107
|
+
@messages ||= []
|
108
|
+
@create_time ||= Time.now.utc.to_f
|
109
|
+
end
|
110
|
+
def attempt?
|
111
|
+
attempts < MAX_ATTEMPTS
|
112
|
+
end
|
113
|
+
def attempt!
|
114
|
+
@attempts = attempts + 1
|
115
|
+
end
|
116
|
+
def current_queue
|
117
|
+
@current_queue
|
118
|
+
end
|
119
|
+
def kind
|
120
|
+
@kind = eval "::#{@kind}" rescue @kind if @kind.is_a?(String)
|
121
|
+
@kind
|
122
|
+
end
|
123
|
+
def kind=(v)
|
124
|
+
@kind = v
|
125
|
+
end
|
126
|
+
def perform
|
127
|
+
@attempts += 1
|
128
|
+
Familia.ld "PERFORM: #{self.to_hash.inspect}"
|
129
|
+
@stime = Time.now.utc.to_f
|
130
|
+
save # update the time
|
131
|
+
self.kind.prepare if self.class.respond_to?(:prepare)
|
132
|
+
self.kind.perform @data
|
133
|
+
@etime = Time.now.utc.to_f
|
134
|
+
save # update the time
|
135
|
+
end
|
136
|
+
def delayed?
|
137
|
+
start = @stime || 0
|
138
|
+
start > Time.now.utc.to_f
|
139
|
+
end
|
140
|
+
def retry!(msg=nil)
|
141
|
+
move! Bluth::High, msg
|
142
|
+
end
|
143
|
+
def failure!(msg=nil)
|
144
|
+
@etime = Time.now.utc.to_i
|
145
|
+
self.kind.failure!
|
146
|
+
move! Bluth::Failed, msg
|
147
|
+
end
|
148
|
+
def success!(msg=nil)
|
149
|
+
@etime = Time.now.utc.to_i
|
150
|
+
self.kind.success!
|
151
|
+
move! Bluth::Successful, msg
|
152
|
+
end
|
153
|
+
def duration
|
154
|
+
return 0 if @stime.nil?
|
155
|
+
et = @etime || Time.now.utc.to_i
|
156
|
+
et - @stime
|
157
|
+
end
|
158
|
+
def dequeue!
|
159
|
+
Familia.ld "Deleting #{self.id} from #{current_queue.key}"
|
160
|
+
Bluth::Queue.redis.lrem current_queue.key, 0, self.id
|
161
|
+
end
|
162
|
+
private
|
163
|
+
def move!(to, msg=nil)
|
164
|
+
@thread_id = $$
|
165
|
+
if to.to_s == current_queue.to_s
|
166
|
+
raise Bluth::Buster, "Cannot move job to the queue it's in: #{to}"
|
167
|
+
end
|
168
|
+
Familia.ld "Moving #{self.id.short} from #{current_queue.key} to #{to.key}"
|
169
|
+
@messages << msg unless msg.nil? || msg.empty?
|
170
|
+
# We push first to make sure we never lose a Gob ID. Instead
|
171
|
+
# there's the small chance of a job ID being in two queues.
|
172
|
+
Bluth::Queue.redis.lpush to.key, @id
|
173
|
+
dequeue!
|
174
|
+
save # update messages
|
175
|
+
@current_queue = to
|
176
|
+
end
|
177
|
+
end
|
178
|
+
|
179
|
+
end
|
180
|
+
|
data/lib/bluth/worker.rb
ADDED
@@ -0,0 +1,356 @@
|
|
1
|
+
require 'eventmachine'
|
2
|
+
require 'rufus/scheduler'
|
3
|
+
require 'daemonizing'
|
4
|
+
require 'timeout'
|
5
|
+
|
6
|
+
module Bluth
|
7
|
+
@salt = rand.gibbler.shorten(10).freeze
|
8
|
+
class << self
|
9
|
+
attr_reader :salt
|
10
|
+
end
|
11
|
+
|
12
|
+
module WorkerBase
|
13
|
+
|
14
|
+
def id
|
15
|
+
@id ||= [host, user, rand, Time.now].gibbler.short
|
16
|
+
end
|
17
|
+
|
18
|
+
def longid
|
19
|
+
[host, user, id].join('-')
|
20
|
+
end
|
21
|
+
|
22
|
+
# Used by daemonize as the process name (linux only)
|
23
|
+
def name
|
24
|
+
"bs-#{self.class.prefix}-#{id}"
|
25
|
+
end
|
26
|
+
|
27
|
+
def key(suffix=nil)
|
28
|
+
self.class.key longid, suffix
|
29
|
+
end
|
30
|
+
|
31
|
+
def initialize
|
32
|
+
@host, @user = Bluth.sysinfo.hostname, Bluth.sysinfo.user
|
33
|
+
@pid_file ||= "/tmp/#{self.class.prefix}-#{id}.pid"
|
34
|
+
@log_file ||= "/tmp/#{self.class.prefix}-#{id}.log"
|
35
|
+
@success, @failure, @problem = 0, 0, 0
|
36
|
+
end
|
37
|
+
|
38
|
+
def current_job
|
39
|
+
Gibbler::Digest.new(@current_job || '')
|
40
|
+
end
|
41
|
+
|
42
|
+
def kill(force=false)
|
43
|
+
if force || host == Bluth.sysinfo.hostname
|
44
|
+
STDERR.puts "Destroying #{self.index} (this machine is: #{Bluth.sysinfo.hostname}; worker is: #{host})"
|
45
|
+
Worker.kill self.pid_file if File.exists?(self.pid_file) rescue Errno::ESRCH
|
46
|
+
File.delete self.log_file if File.exists?(self.log_file)
|
47
|
+
destroy!
|
48
|
+
else
|
49
|
+
STDERR.puts "Worker #{self.index} not running on #{Bluth.sysinfo.hostname}"
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
def working! gobid
|
54
|
+
@current_job = gobid
|
55
|
+
update_time
|
56
|
+
save
|
57
|
+
end
|
58
|
+
|
59
|
+
def self.included(obj)
|
60
|
+
obj.extend WorkerBase::ClassMethods
|
61
|
+
end
|
62
|
+
|
63
|
+
module ClassMethods
|
64
|
+
def from_redis(wid)
|
65
|
+
me = new
|
66
|
+
me.id = wid
|
67
|
+
super(me.longid)
|
68
|
+
end
|
69
|
+
|
70
|
+
def run!(*args)
|
71
|
+
me = new
|
72
|
+
Familia.info "Created: #{me.key}"
|
73
|
+
me.run!
|
74
|
+
me
|
75
|
+
end
|
76
|
+
|
77
|
+
def run(*args)
|
78
|
+
me = new
|
79
|
+
Familia.info "Created: #{me.key}"
|
80
|
+
me.run
|
81
|
+
me
|
82
|
+
end
|
83
|
+
|
84
|
+
def kill(pid_file)
|
85
|
+
pid = read_pid_file pid_file
|
86
|
+
super(pid_file, 10)
|
87
|
+
end
|
88
|
+
|
89
|
+
|
90
|
+
end
|
91
|
+
|
92
|
+
end
|
93
|
+
|
94
|
+
class Worker < Storable
|
95
|
+
include WorkerBase
|
96
|
+
@interval = 2.seconds
|
97
|
+
class << self
|
98
|
+
attr_accessor :interval
|
99
|
+
end
|
100
|
+
include Familia
|
101
|
+
include Logging
|
102
|
+
include Daemonizable
|
103
|
+
prefix :worker
|
104
|
+
index :id
|
105
|
+
field :host
|
106
|
+
field :user
|
107
|
+
field :id
|
108
|
+
field :process_id => Integer
|
109
|
+
field :pid_file
|
110
|
+
field :log_file
|
111
|
+
field :current_job
|
112
|
+
field :success => Integer
|
113
|
+
field :failure => Integer
|
114
|
+
field :problem => Integer
|
115
|
+
include Familia::Stamps
|
116
|
+
def success!
|
117
|
+
@success += 1
|
118
|
+
@current_job = ""
|
119
|
+
update_time
|
120
|
+
save
|
121
|
+
end
|
122
|
+
def failure!
|
123
|
+
@failure += 1
|
124
|
+
@current_job = ""
|
125
|
+
update_time
|
126
|
+
save
|
127
|
+
end
|
128
|
+
def problem!
|
129
|
+
@problem += 1
|
130
|
+
@current_job = ""
|
131
|
+
update_time
|
132
|
+
save
|
133
|
+
end
|
134
|
+
|
135
|
+
def run!
|
136
|
+
begin
|
137
|
+
find_gob
|
138
|
+
rescue => ex
|
139
|
+
msg = "#{ex.class}: #{ex.message}"
|
140
|
+
STDERR.puts msg
|
141
|
+
Familia.ld :EXCEPTION, msg, caller[1] if Familia.debug?
|
142
|
+
destroy!
|
143
|
+
rescue Interrupt => ex
|
144
|
+
puts $/, "Exiting..."
|
145
|
+
destroy!
|
146
|
+
end
|
147
|
+
end
|
148
|
+
|
149
|
+
def run
|
150
|
+
begin
|
151
|
+
@process_id = $$
|
152
|
+
save
|
153
|
+
|
154
|
+
scheduler = Rufus::Scheduler.start_new
|
155
|
+
Familia.info "Setting interval: #{Worker.interval} sec (poptimeout: #{Bluth.poptimeout})"
|
156
|
+
Familia.reconnect_all! # Need to reconnect after daemonize
|
157
|
+
## TODO: Works but needs to restart scheduler
|
158
|
+
##Signal.trap("USR1") do
|
159
|
+
## Worker.interval += 1
|
160
|
+
## Familia.info "Setting interval: #{Worker.interval} sec"
|
161
|
+
##end
|
162
|
+
##Signal.trap("USR2") do
|
163
|
+
## Worker.interval -= 1
|
164
|
+
## Familia.info "Setting interval: #{Worker.interval}"
|
165
|
+
##end
|
166
|
+
scheduler.every Worker.interval, :blocking => true do |task|
|
167
|
+
Familia.ld "#{$$} TICK @ #{Time.now.utc}"
|
168
|
+
sleep rand
|
169
|
+
find_gob task
|
170
|
+
end
|
171
|
+
scheduler.join
|
172
|
+
|
173
|
+
rescue => ex
|
174
|
+
msg = "#{ex.class}: #{ex.message}"
|
175
|
+
STDERR.puts msg
|
176
|
+
Familia.ld :EXCEPTION, msg, caller[1] if Familia.debug?
|
177
|
+
destroy!
|
178
|
+
rescue Interrupt => ex
|
179
|
+
puts <<-EOS.gsub(/(?:^|\n)\s*/, "\n")
|
180
|
+
Exiting...
|
181
|
+
(You may need to wait up to #{Bluth.poptimeout} seconds
|
182
|
+
for this worker to exit cleanly.)
|
183
|
+
EOS
|
184
|
+
# We reconnect to the queue in case we're currently
|
185
|
+
# waiting on a brpop (blocking pop) timeout.
|
186
|
+
destroy!
|
187
|
+
end
|
188
|
+
|
189
|
+
end
|
190
|
+
|
191
|
+
|
192
|
+
private
|
193
|
+
require 'benchmark'
|
194
|
+
# DO NOT return from this method
|
195
|
+
def find_gob(task=nil)
|
196
|
+
begin
|
197
|
+
job = Bluth.pop
|
198
|
+
unless job.nil?
|
199
|
+
job.wid = self.id
|
200
|
+
if job.delayed?
|
201
|
+
job.attempts = 0
|
202
|
+
job.retry!
|
203
|
+
elsif !job.attempt?
|
204
|
+
job.failure! "Too many attempts"
|
205
|
+
else
|
206
|
+
job.stime = Time.now.utc.to_i
|
207
|
+
self.working! job.id
|
208
|
+
tms = Benchmark.measure do
|
209
|
+
job.perform
|
210
|
+
end
|
211
|
+
job.cpu = [tms.utime.fineround(3),tms.stime.fineround(3),tms.real.fineround(3)]
|
212
|
+
job.save
|
213
|
+
job.success!
|
214
|
+
self.success!
|
215
|
+
end
|
216
|
+
end
|
217
|
+
rescue Bluth::Shutdown => ex
|
218
|
+
msg = "Shutdown requested: #{ex.message}"
|
219
|
+
job.success! msg
|
220
|
+
Familia.info msg
|
221
|
+
task.unschedule
|
222
|
+
destroy!
|
223
|
+
exit
|
224
|
+
rescue Bluth::Maeby => ex
|
225
|
+
Familia.info ex.message
|
226
|
+
job.success! ex.message
|
227
|
+
self.success!
|
228
|
+
rescue Bluth::Buster => ex
|
229
|
+
Familia.info ex.message
|
230
|
+
job.failure! ex.message
|
231
|
+
self.failure!
|
232
|
+
rescue => ex
|
233
|
+
Familia.info ex.message
|
234
|
+
Familia.info ex.backtrace
|
235
|
+
job.retry! "#{ex.class}: #{ex.message}" if job
|
236
|
+
problem!
|
237
|
+
#if problem > 5
|
238
|
+
# ## TODO: SEND EMAIL
|
239
|
+
# task.unschedule unless task.nil? # Kill this worker b/c something is clearly wrong
|
240
|
+
# destroy!
|
241
|
+
# EM.stop
|
242
|
+
# exit 1
|
243
|
+
#end
|
244
|
+
end
|
245
|
+
end
|
246
|
+
|
247
|
+
end
|
248
|
+
|
249
|
+
class ScheduleWorker < Storable
|
250
|
+
include WorkerBase
|
251
|
+
@interval = 20
|
252
|
+
@timeout = 60 #not working
|
253
|
+
class << self
|
254
|
+
attr_accessor :interval, :timeout
|
255
|
+
def interval(v=nil)
|
256
|
+
@interval = v unless v.nil?
|
257
|
+
@interval
|
258
|
+
end
|
259
|
+
end
|
260
|
+
include Familia
|
261
|
+
include Logging
|
262
|
+
include Daemonizable
|
263
|
+
prefix :scheduler
|
264
|
+
index :id
|
265
|
+
field :host
|
266
|
+
field :user
|
267
|
+
field :id
|
268
|
+
field :process_id => Integer
|
269
|
+
field :pid_file
|
270
|
+
field :log_file
|
271
|
+
field :scheduled => Integer
|
272
|
+
field :monitored => Integer
|
273
|
+
field :timeouts => Integer
|
274
|
+
include Familia::Stamps
|
275
|
+
attr_reader :schedule
|
276
|
+
attr_reader :monitors
|
277
|
+
|
278
|
+
def scheduled!(count=1)
|
279
|
+
@scheduled ||= 0
|
280
|
+
@scheduled += count
|
281
|
+
update_time
|
282
|
+
save
|
283
|
+
end
|
284
|
+
def monitored!(count=1)
|
285
|
+
@monitored ||= 0
|
286
|
+
@monitored += count
|
287
|
+
update_time
|
288
|
+
save
|
289
|
+
end
|
290
|
+
def timeout!(count=1)
|
291
|
+
@timeouts ||= 0
|
292
|
+
@timeouts += count
|
293
|
+
update_time
|
294
|
+
save
|
295
|
+
end
|
296
|
+
def run!
|
297
|
+
run
|
298
|
+
end
|
299
|
+
def run
|
300
|
+
begin
|
301
|
+
raise Familia::Problem, "Only 1 scheduler at a time" if ScheduleWorker.any?
|
302
|
+
|
303
|
+
EM.run {
|
304
|
+
@process_id = $$
|
305
|
+
srand(Bluth.salt.to_i(16) ** @process_id)
|
306
|
+
@schedule = Rufus::Scheduler::EmScheduler.start_new
|
307
|
+
save # persist and make note the scheduler is running
|
308
|
+
prepare
|
309
|
+
@schedule.every self.class.interval, :tags => :keeper do |keeper_task|
|
310
|
+
begin
|
311
|
+
scheduled_work(keeper_task)
|
312
|
+
rescue => ex
|
313
|
+
msg = "#{ex.class}: #{ex.message}"
|
314
|
+
STDERR.puts msg
|
315
|
+
STDERR.puts ex.backtrace
|
316
|
+
Familia.ld :EXCEPTION, msg, caller[1] if Familia.debug?
|
317
|
+
end
|
318
|
+
sleep rand # prevent thrashing
|
319
|
+
end
|
320
|
+
}
|
321
|
+
rescue => ex
|
322
|
+
msg = "#{ex.class}: #{ex.message}"
|
323
|
+
puts msg
|
324
|
+
STDERR.puts ex.backtrace
|
325
|
+
Familia.ld :EXCEPTION, msg, caller[1] if Familia.debug?
|
326
|
+
destroy!
|
327
|
+
rescue Interrupt => ex
|
328
|
+
puts $/, "Exiting..."
|
329
|
+
destroy!
|
330
|
+
end
|
331
|
+
end
|
332
|
+
|
333
|
+
protected
|
334
|
+
|
335
|
+
def prepare
|
336
|
+
end
|
337
|
+
|
338
|
+
def scheduled_work(keeper)
|
339
|
+
STDOUT.puts "Come on!"
|
340
|
+
end
|
341
|
+
|
342
|
+
end
|
343
|
+
|
344
|
+
end
|
345
|
+
|
346
|
+
class Rufus::Scheduler::SchedulerCore
|
347
|
+
# See lib/rufus/sc/scheduler.rb
|
348
|
+
def handle_exception(job, exception)
|
349
|
+
case exception
|
350
|
+
when SystemExit
|
351
|
+
exit
|
352
|
+
else
|
353
|
+
super
|
354
|
+
end
|
355
|
+
end
|
356
|
+
end
|
data/lib/daemonizing.rb
ADDED
@@ -0,0 +1,221 @@
|
|
1
|
+
require 'etc'
|
2
|
+
require 'daemons'
|
3
|
+
|
4
|
+
module Process
|
5
|
+
# Returns +true+ if the process identifed by +pid+ is running.
|
6
|
+
def running?(pid)
|
7
|
+
Process.getpgid(pid) != -1
|
8
|
+
rescue Errno::ESRCH
|
9
|
+
false
|
10
|
+
end
|
11
|
+
module_function :running?
|
12
|
+
end
|
13
|
+
|
14
|
+
# Raised when the pid file already exist starting as a daemon.
|
15
|
+
class PidFileExist < RuntimeError; end
|
16
|
+
|
17
|
+
# Module included in classes that can be turned into a daemon.
|
18
|
+
# Handle stuff like:
|
19
|
+
# * storing the PID in a file
|
20
|
+
# * redirecting output to the log file
|
21
|
+
# * changing processs privileges
|
22
|
+
# * killing the process gracefully
|
23
|
+
module Daemonizable
|
24
|
+
attr_accessor :pid_file, :log_file
|
25
|
+
|
26
|
+
def self.included(base)
|
27
|
+
base.extend ClassMethods
|
28
|
+
end
|
29
|
+
|
30
|
+
def pid
|
31
|
+
File.exist?(pid_file) ? open(pid_file).read.to_i : nil
|
32
|
+
end
|
33
|
+
|
34
|
+
# Turns the current script into a daemon process that detaches from the console.
|
35
|
+
def daemonize
|
36
|
+
raise ArgumentError, 'You must specify a pid_file to daemonize' unless @pid_file
|
37
|
+
|
38
|
+
remove_stale_pid_file
|
39
|
+
|
40
|
+
pwd = Dir.pwd # Current directory is changed during daemonization, so store it
|
41
|
+
|
42
|
+
# HACK we need to create the directory before daemonization to prevent a bug under 1.9
|
43
|
+
# ignoring all signals when the directory is created after daemonization.
|
44
|
+
FileUtils.mkdir_p File.dirname(@pid_file)
|
45
|
+
|
46
|
+
Daemonize.daemonize(File.expand_path(@log_file), name)
|
47
|
+
|
48
|
+
Dir.chdir(pwd)
|
49
|
+
|
50
|
+
write_pid_file
|
51
|
+
|
52
|
+
at_exit do
|
53
|
+
log ">> Exiting!"
|
54
|
+
remove_pid_file
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
# Change privileges of the process
|
59
|
+
# to the specified user and group.
|
60
|
+
def change_privilege(user, group=user)
|
61
|
+
log ">> Changing process privilege to #{user}:#{group}"
|
62
|
+
|
63
|
+
uid, gid = Process.euid, Process.egid
|
64
|
+
target_uid = Etc.getpwnam(user).uid
|
65
|
+
target_gid = Etc.getgrnam(group).gid
|
66
|
+
|
67
|
+
if uid != target_uid || gid != target_gid
|
68
|
+
# Change process ownership
|
69
|
+
Process.initgroups(user, target_gid)
|
70
|
+
Process::GID.change_privilege(target_gid)
|
71
|
+
Process::UID.change_privilege(target_uid)
|
72
|
+
end
|
73
|
+
rescue Errno::EPERM => e
|
74
|
+
log "Couldn't change user and group to #{user}:#{group}: #{e}"
|
75
|
+
end
|
76
|
+
|
77
|
+
# Register a proc to be called to restart the server.
|
78
|
+
def on_restart(&block)
|
79
|
+
@on_restart = block
|
80
|
+
end
|
81
|
+
|
82
|
+
# Restart the server.
|
83
|
+
def restart
|
84
|
+
if @on_restart
|
85
|
+
log '>> Restarting ...'
|
86
|
+
stop
|
87
|
+
remove_pid_file
|
88
|
+
@on_restart.call
|
89
|
+
exit!
|
90
|
+
end
|
91
|
+
end
|
92
|
+
|
93
|
+
module ClassMethods
|
94
|
+
# Send a QUIT or INT (if timeout is +0+) signal the process which
|
95
|
+
# PID is stored in +pid_file+.
|
96
|
+
# If the process is still running after +timeout+, KILL signal is
|
97
|
+
# sent.
|
98
|
+
def kill(pid_file, timeout=60)
|
99
|
+
if timeout == 0
|
100
|
+
send_signal('INT', pid_file, timeout)
|
101
|
+
else
|
102
|
+
send_signal('QUIT', pid_file, timeout)
|
103
|
+
end
|
104
|
+
end
|
105
|
+
|
106
|
+
# Restart the server by sending HUP signal.
|
107
|
+
def restart(pid_file)
|
108
|
+
send_signal('HUP', pid_file)
|
109
|
+
end
|
110
|
+
|
111
|
+
# Send a +signal+ to the process which PID is stored in +pid_file+.
|
112
|
+
def send_signal(signal, pid_file, timeout=60)
|
113
|
+
if pid = read_pid_file(pid_file)
|
114
|
+
Logging.log "Sending #{signal} signal to process #{pid} ... "
|
115
|
+
Process.kill(signal, pid)
|
116
|
+
Timeout.timeout(timeout) do
|
117
|
+
sleep 0.1 while Process.running?(pid)
|
118
|
+
end
|
119
|
+
else
|
120
|
+
Logging.log "Can't stop process, no PID found in #{pid_file}"
|
121
|
+
end
|
122
|
+
rescue Timeout::Error
|
123
|
+
Logging.log "Timeout!"
|
124
|
+
force_kill pid_file
|
125
|
+
rescue Interrupt
|
126
|
+
force_kill pid_file
|
127
|
+
rescue Errno::ESRCH # No such process
|
128
|
+
Logging.log "process not found!"
|
129
|
+
force_kill pid_file
|
130
|
+
end
|
131
|
+
|
132
|
+
def force_kill(pid_file)
|
133
|
+
if pid = read_pid_file(pid_file)
|
134
|
+
Logging.log "Sending KILL signal to process #{pid} ... "
|
135
|
+
Process.kill("KILL", pid)
|
136
|
+
File.delete(pid_file) if File.exist?(pid_file)
|
137
|
+
else
|
138
|
+
Logging.log "Can't stop process, no PID found in #{pid_file}"
|
139
|
+
end
|
140
|
+
end
|
141
|
+
|
142
|
+
def read_pid_file(file)
|
143
|
+
if File.file?(file) && pid = File.read(file)
|
144
|
+
pid.to_i
|
145
|
+
else
|
146
|
+
nil
|
147
|
+
end
|
148
|
+
end
|
149
|
+
end
|
150
|
+
|
151
|
+
protected
|
152
|
+
def remove_pid_file
|
153
|
+
File.delete(@pid_file) if @pid_file && File.exists?(@pid_file)
|
154
|
+
end
|
155
|
+
|
156
|
+
def write_pid_file
|
157
|
+
log ">> Writing PID to #{@pid_file}"
|
158
|
+
open(@pid_file,"w") { |f| f.write(Process.pid) }
|
159
|
+
File.chmod(0644, @pid_file)
|
160
|
+
end
|
161
|
+
|
162
|
+
# If PID file is stale, remove it.
|
163
|
+
def remove_stale_pid_file
|
164
|
+
if File.exist?(@pid_file)
|
165
|
+
if pid && Process.running?(pid)
|
166
|
+
raise PidFileExist, "#{@pid_file} already exists, seems like it's already running (process ID: #{pid}). " +
|
167
|
+
"Stop the process or delete #{@pid_file}."
|
168
|
+
else
|
169
|
+
log ">> Deleting stale PID file #{@pid_file}"
|
170
|
+
remove_pid_file
|
171
|
+
end
|
172
|
+
end
|
173
|
+
end
|
174
|
+
end
|
175
|
+
|
176
|
+
|
177
|
+
module Logging
|
178
|
+
class << self
|
179
|
+
attr_writer :trace, :debug, :silent
|
180
|
+
|
181
|
+
def trace?; !@silent && @trace end
|
182
|
+
def debug?; !@silent && @debug end
|
183
|
+
def silent?; @silent end
|
184
|
+
end
|
185
|
+
|
186
|
+
# Global silencer methods
|
187
|
+
def silent
|
188
|
+
Logging.silent?
|
189
|
+
end
|
190
|
+
def silent=(value)
|
191
|
+
Logging.silent = value
|
192
|
+
end
|
193
|
+
|
194
|
+
# Log a message to the console
|
195
|
+
def log(msg)
|
196
|
+
puts msg unless Logging.silent?
|
197
|
+
end
|
198
|
+
module_function :log
|
199
|
+
public :log
|
200
|
+
|
201
|
+
# Log a message to the console if tracing is activated
|
202
|
+
def trace(msg=nil)
|
203
|
+
log msg || yield if Logging.trace?
|
204
|
+
end
|
205
|
+
module_function :trace
|
206
|
+
public :trace
|
207
|
+
|
208
|
+
# Log a message to the console if debugging is activated
|
209
|
+
def debug(msg=nil)
|
210
|
+
log msg || yield if Logging.debug?
|
211
|
+
end
|
212
|
+
module_function :debug
|
213
|
+
public :debug
|
214
|
+
|
215
|
+
# Log an error backtrace if debugging is activated
|
216
|
+
def log_error(e=$!)
|
217
|
+
debug "#{e}\n\t" + e.backtrace.join("\n\t")
|
218
|
+
end
|
219
|
+
module_function :log_error
|
220
|
+
public :log_error
|
221
|
+
end
|
metadata
ADDED
@@ -0,0 +1,108 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: bluth
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
hash: 15
|
5
|
+
prerelease: false
|
6
|
+
segments:
|
7
|
+
- 0
|
8
|
+
- 5
|
9
|
+
- 2
|
10
|
+
version: 0.5.2
|
11
|
+
platform: ruby
|
12
|
+
authors:
|
13
|
+
- Delano Mandelbaum
|
14
|
+
autorequire:
|
15
|
+
bindir: bin
|
16
|
+
cert_chain: []
|
17
|
+
|
18
|
+
date: 2010-12-10 00:00:00 -05:00
|
19
|
+
default_executable:
|
20
|
+
dependencies:
|
21
|
+
- !ruby/object:Gem::Dependency
|
22
|
+
name: familia
|
23
|
+
prerelease: false
|
24
|
+
requirement: &id001 !ruby/object:Gem::Requirement
|
25
|
+
none: false
|
26
|
+
requirements:
|
27
|
+
- - ">="
|
28
|
+
- !ruby/object:Gem::Version
|
29
|
+
hash: 13
|
30
|
+
segments:
|
31
|
+
- 0
|
32
|
+
- 5
|
33
|
+
- 3
|
34
|
+
version: 0.5.3
|
35
|
+
type: :runtime
|
36
|
+
version_requirements: *id001
|
37
|
+
- !ruby/object:Gem::Dependency
|
38
|
+
name: sysinfo
|
39
|
+
prerelease: false
|
40
|
+
requirement: &id002 !ruby/object:Gem::Requirement
|
41
|
+
none: false
|
42
|
+
requirements:
|
43
|
+
- - ">="
|
44
|
+
- !ruby/object:Gem::Version
|
45
|
+
hash: 5
|
46
|
+
segments:
|
47
|
+
- 0
|
48
|
+
- 7
|
49
|
+
- 3
|
50
|
+
version: 0.7.3
|
51
|
+
type: :runtime
|
52
|
+
version_requirements: *id002
|
53
|
+
description: A Redis queuing system built on top of Familia
|
54
|
+
email: delano@solutious.com
|
55
|
+
executables: []
|
56
|
+
|
57
|
+
extensions: []
|
58
|
+
|
59
|
+
extra_rdoc_files:
|
60
|
+
- LICENSE.txt
|
61
|
+
- README.rdoc
|
62
|
+
files:
|
63
|
+
- CHANGES.txt
|
64
|
+
- LICENSE.txt
|
65
|
+
- README.rdoc
|
66
|
+
- Rakefile
|
67
|
+
- VERSION.yml
|
68
|
+
- bluth.gemspec
|
69
|
+
- lib/bluth.rb
|
70
|
+
- lib/bluth/gob.rb
|
71
|
+
- lib/bluth/worker.rb
|
72
|
+
- lib/daemonizing.rb
|
73
|
+
has_rdoc: true
|
74
|
+
homepage: http://github.com/delano/bluth
|
75
|
+
licenses: []
|
76
|
+
|
77
|
+
post_install_message:
|
78
|
+
rdoc_options:
|
79
|
+
- --charset=UTF-8
|
80
|
+
require_paths:
|
81
|
+
- lib
|
82
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
83
|
+
none: false
|
84
|
+
requirements:
|
85
|
+
- - ">="
|
86
|
+
- !ruby/object:Gem::Version
|
87
|
+
hash: 3
|
88
|
+
segments:
|
89
|
+
- 0
|
90
|
+
version: "0"
|
91
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
92
|
+
none: false
|
93
|
+
requirements:
|
94
|
+
- - ">="
|
95
|
+
- !ruby/object:Gem::Version
|
96
|
+
hash: 3
|
97
|
+
segments:
|
98
|
+
- 0
|
99
|
+
version: "0"
|
100
|
+
requirements: []
|
101
|
+
|
102
|
+
rubyforge_project: bluth
|
103
|
+
rubygems_version: 1.3.7
|
104
|
+
signing_key:
|
105
|
+
specification_version: 3
|
106
|
+
summary: A Redis queuing system built on top of Familia
|
107
|
+
test_files: []
|
108
|
+
|