threadz 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- data/.gitignore +7 -0
- data/CHANGELOG +3 -0
- data/MIT-LICENSE +21 -0
- data/README.rdoc +49 -0
- data/Rakefile +126 -0
- data/VERSION +1 -0
- data/lib/threadz.rb +23 -0
- data/lib/threadz/atomic_integer.rb +28 -0
- data/lib/threadz/batch.rb +121 -0
- data/lib/threadz/directive.rb +7 -0
- data/lib/threadz/sleeper.rb +49 -0
- data/lib/threadz/thread_pool.rb +118 -0
- data/spec/atomic_integer_spec.rb +47 -0
- data/spec/spec_helper.rb +2 -0
- data/spec/threadz_spec.rb +253 -0
- data/threadz.gemspec +56 -0
- metadata +72 -0
data/.gitignore
ADDED
data/CHANGELOG
ADDED
data/MIT-LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
Copyright (c) 2009 Max Aller <nanodeath@gmail.com>
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
4
|
+
of this software and associated documentation files (the "Software"), to
|
5
|
+
deal
|
6
|
+
in the Software without restriction, including without limitation the rights
|
7
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
8
|
+
copies of the Software, and to permit persons to whom the Software is
|
9
|
+
furnished to do so, subject to the following conditions:
|
10
|
+
|
11
|
+
The above copyright notice and this permission notice shall be included in
|
12
|
+
all copies or substantial portions of the Software.
|
13
|
+
|
14
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
15
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
16
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
17
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
18
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
19
|
+
FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21
|
+
THE SOFTWARE.
|
data/README.rdoc
ADDED
@@ -0,0 +1,49 @@
|
|
1
|
+
= Threadz Thread Pool Library
|
2
|
+
|
3
|
+
== Description
|
4
|
+
|
5
|
+
This is a thread pool library that you can do two main things with, which I'll demonstrate in code:
|
6
|
+
|
7
|
+
# These are more for "fire and forget" tasks
|
8
|
+
T1 = Threadz::ThreadPool.new
|
9
|
+
T1.process { puts "my first task" }
|
10
|
+
T1.process { puts "my second task" }
|
11
|
+
|
12
|
+
# If you care when the tasks complete, use batches
|
13
|
+
T2 = Threadz::ThreadPool.new
|
14
|
+
b = T2.new_batch
|
15
|
+
b << lambda { puts "my first task" }
|
16
|
+
b << lambda { puts "my second task" }
|
17
|
+
|
18
|
+
puts "do a couple of other things..."
|
19
|
+
|
20
|
+
b.wait_until_done
|
21
|
+
|
22
|
+
# You can do other things, too
|
23
|
+
|
24
|
+
T3 = Threadz::ThreadPool.new
|
25
|
+
b = T3.new_batch
|
26
|
+
b << lambda { puts "my first task" }
|
27
|
+
b << lambda { puts "my second task" }
|
28
|
+
|
29
|
+
puts "do a couple of other things..."
|
30
|
+
|
31
|
+
b.when_done { puts "woohoo, done with tasks" }
|
32
|
+
|
33
|
+
puts "and some other stuff, blah"
|
34
|
+
|
35
|
+
b = T3.new_batch
|
36
|
+
b << lambda { 10000000.times {} }
|
37
|
+
|
38
|
+
b.wait_until_done(:timeout => 0.1)
|
39
|
+
puts b.completed? ? "finished!" : "didn't finish"
|
40
|
+
|
41
|
+
The thread pool is also smart -- depending on load, it can either spawn or cull additional threads (at a rate you can set).
|
42
|
+
|
43
|
+
== Examples
|
44
|
+
|
45
|
+
For examples, please see the well-documented specs. They're all fairly simple and straightforward. Please message me if they're not.
|
46
|
+
|
47
|
+
== Disclaimer
|
48
|
+
|
49
|
+
Consider this product in late alpha. There are still some bugs to be worked out and the API may change.
|
data/Rakefile
ADDED
@@ -0,0 +1,126 @@
|
|
1
|
+
# Adapted from the rake Rakefile.
|
2
|
+
|
3
|
+
require 'rubygems'
|
4
|
+
require 'rake/testtask'
|
5
|
+
require 'rake/rdoctask'
|
6
|
+
require 'rake/gempackagetask'
|
7
|
+
require 'rubygems/source_info_cache'
|
8
|
+
require 'spec/rake/spectask'
|
9
|
+
|
10
|
+
|
11
|
+
spec = Gem::Specification.load(File.join(File.dirname(__FILE__), 'threadz.gemspec'))
|
12
|
+
|
13
|
+
desc "Default Task"
|
14
|
+
task 'default' => ['spec', 'rdoc']
|
15
|
+
|
16
|
+
|
17
|
+
desc "If you're building from sources, run this task first to setup the necessary dependencies"
|
18
|
+
task 'setup' do
|
19
|
+
windows = Config::CONFIG['host_os'] =~ /windows|cygwin|bccwin|cygwin|djgpp|mingw|mswin|wince/i
|
20
|
+
rb_bin = File.expand_path(Config::CONFIG['ruby_install_name'], Config::CONFIG['bindir'])
|
21
|
+
spec.dependencies.select { |dep| Gem::SourceIndex.from_installed_gems.search(dep).empty? }.each do |missing|
|
22
|
+
dep = Gem::Dependency.new(missing.name, missing.version_requirements)
|
23
|
+
spec = Gem::SourceInfoCache.search(dep, true, true).last
|
24
|
+
fail "#{dep} not found in local or remote repository!" unless spec
|
25
|
+
puts "Installing #{spec.full_name} ..."
|
26
|
+
args = [rb_bin, '-S', 'gem', 'install', spec.name, '-v', spec.version.to_s]
|
27
|
+
args.unshift('sudo') unless windows || ENV['GEM_HOME']
|
28
|
+
sh args.map{ |a| a.inspect }.join(' ')
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
|
33
|
+
desc "Run all test cases"
|
34
|
+
task 'spec' do |task|
|
35
|
+
exec 'spec -c spec/*.rb'
|
36
|
+
end
|
37
|
+
|
38
|
+
desc "Run all test cases 10 times (or n times)"
|
39
|
+
task 'spec-stress', [:times] do |task, args|
|
40
|
+
args.with_defaults :times => 10
|
41
|
+
puts "Executing spec #{args.times} times"
|
42
|
+
puts Rake::Task[:spec].methods.sort.inspect
|
43
|
+
args.times.times do
|
44
|
+
Rake::Task[:spec].execute
|
45
|
+
puts "foo"
|
46
|
+
end
|
47
|
+
puts "Done!"
|
48
|
+
end
|
49
|
+
|
50
|
+
# Create the documentation.
|
51
|
+
Rake::RDocTask.new do |rdoc|
|
52
|
+
rdoc.main = 'README.rdoc'
|
53
|
+
rdoc.rdoc_files.include('README.rdoc', 'lib/**/*.rb')
|
54
|
+
rdoc.title = "Threadz Thread Pool"
|
55
|
+
rdoc.rdoc_dir = 'doc'
|
56
|
+
end
|
57
|
+
|
58
|
+
|
59
|
+
gem = Rake::GemPackageTask.new(spec) do |pkg|
|
60
|
+
pkg.need_tar = true
|
61
|
+
pkg.need_zip = true
|
62
|
+
end
|
63
|
+
|
64
|
+
desc "Install the package locally"
|
65
|
+
task 'install'=>['setup', 'package'] do |task|
|
66
|
+
rb_bin = File.expand_path(Config::CONFIG['ruby_install_name'], Config::CONFIG['bindir'])
|
67
|
+
args = [rb_bin, '-S', 'gem', 'install', "pkg/#{spec.name}-#{spec.version}.gem"]
|
68
|
+
windows = Config::CONFIG['host_os'] =~ /windows|cygwin|bccwin|cygwin|djgpp|mingw|mswin|wince/i
|
69
|
+
args.unshift('sudo') unless windows || ENV['GEM_HOME']
|
70
|
+
sh args.map{ |a| a.inspect }.join(' ')
|
71
|
+
end
|
72
|
+
|
73
|
+
desc "Uninstall previously installed packaged"
|
74
|
+
task 'uninstall' do |task|
|
75
|
+
rb_bin = File.expand_path(Config::CONFIG['ruby_install_name'], Config::CONFIG['bindir'])
|
76
|
+
args = [rb_bin, '-S', 'gem', 'install', spec.name, '-v', spec.version.to_s]
|
77
|
+
windows = Config::CONFIG['host_os'] =~ /windows|cygwin|bccwin|cygwin|djgpp|mingw|mswin|wince/i
|
78
|
+
args.unshift('sudo') unless windows || ENV['GEM_HOME']
|
79
|
+
sh args.map{ |a| a.inspect }.join(' ')
|
80
|
+
end
|
81
|
+
|
82
|
+
|
83
|
+
task 'release'=>['setup', 'test', 'package'] do
|
84
|
+
|
85
|
+
require 'rubyforge'
|
86
|
+
changes = File.read('CHANGELOG')[/\d+.\d+.\d+.*\n((:?^[^\n]+\n)*)/]
|
87
|
+
File.open '.changes', 'w' do |file|
|
88
|
+
file.write changes
|
89
|
+
end
|
90
|
+
|
91
|
+
puts "Uploading #{spec.name} #{spec.version}"
|
92
|
+
files = Dir['pkg/*.{gem,tgz,zip}']
|
93
|
+
rubyforge = RubyForge.new
|
94
|
+
rubyforge.configure
|
95
|
+
rubyforge.login
|
96
|
+
rubyforge.userconfig.merge! 'release_changes'=>'.changes', 'preformatted'=>true
|
97
|
+
rubyforge.add_release spec.rubyforge_project.downcase, spec.name.downcase, spec.version.to_s, *files
|
98
|
+
rm_f '.changes'
|
99
|
+
puts "Release #{spec.version} uploaded"
|
100
|
+
end
|
101
|
+
|
102
|
+
task 'clobber' do
|
103
|
+
rm_f '.changes'
|
104
|
+
end
|
105
|
+
|
106
|
+
desc "Run all examples with RCov"
|
107
|
+
Spec::Rake::SpecTask.new('spec:rcov') do |t|
|
108
|
+
t.spec_files = FileList['spec/**/*.rb']
|
109
|
+
t.rcov = true
|
110
|
+
t.rcov_opts = ['--exclude', 'spec']
|
111
|
+
end
|
112
|
+
|
113
|
+
begin
|
114
|
+
require 'jeweler'
|
115
|
+
Jeweler::Tasks.new do |gemspec|
|
116
|
+
gemspec.name = "threadz"
|
117
|
+
gemspec.summary = "A Ruby threadpool library to handle threadpools and make batch jobs easier."
|
118
|
+
#gemspec.description = "Longer description?"
|
119
|
+
gemspec.email = "nanodeath@gmail.com"
|
120
|
+
gemspec.homepage = "http://github.com/nanodeath/threadz"
|
121
|
+
gemspec.authors = ["Max Aller"]
|
122
|
+
end
|
123
|
+
Jeweler::GemcutterTasks.new
|
124
|
+
rescue LoadError
|
125
|
+
puts "Jeweler not available. Install it with: sudo gem install jeweler"
|
126
|
+
end
|
data/VERSION
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
0.1.0
|
data/lib/threadz.rb
ADDED
@@ -0,0 +1,23 @@
|
|
1
|
+
# Threadz is a library that makes it easier to queue up batches of jobs and
|
2
|
+
# execute them as the developer pleases. With Threadz, it's also easier to
|
3
|
+
# wait on that batch completing: i.e. fire off 5 jobs at the same time and then
|
4
|
+
# wait until they're all finished. Of course, this is also a threadpool: the
|
5
|
+
# number of threads available for scheduling can scale up and down as load
|
6
|
+
# requires.
|
7
|
+
#
|
8
|
+
# Author:: Max Aller (mailto: nanodeath@gmail.com)
|
9
|
+
# Copyright:: Copyright (c) 2009
|
10
|
+
# License:: Distributed under the MIT License
|
11
|
+
|
12
|
+
# Example:
|
13
|
+
# T = ThreadPool.new
|
14
|
+
# b = T.new_batch
|
15
|
+
# b << lambda { puts "foo" },
|
16
|
+
# b << lambda { puts "bar" },
|
17
|
+
# b << [ lambda { puts "can" }, lamba { puts "monkey" }]
|
18
|
+
# b.wait_until_done
|
19
|
+
|
20
|
+
require 'thread'
|
21
|
+
|
22
|
+
['atomic_integer', 'sleeper', 'directive', 'batch', 'thread_pool'].each { |lib| require File.join(File.dirname(__FILE__), 'threadz', lib) }
|
23
|
+
|
@@ -0,0 +1,28 @@
|
|
1
|
+
require 'thread'
|
2
|
+
|
3
|
+
module Threadz
|
4
|
+
class AtomicInteger
|
5
|
+
def initialize(value)
|
6
|
+
@value = value
|
7
|
+
@mutex = Mutex.new
|
8
|
+
end
|
9
|
+
|
10
|
+
def value
|
11
|
+
@value
|
12
|
+
end
|
13
|
+
|
14
|
+
def increment(amount=1)
|
15
|
+
# We could use Mutex#synchronize here, but compared to modifying an
|
16
|
+
# integer, creating a block is crazy expensive
|
17
|
+
@mutex.lock
|
18
|
+
@value += amount
|
19
|
+
@mutex.unlock
|
20
|
+
end
|
21
|
+
|
22
|
+
def decrement(amount=1)
|
23
|
+
@mutex.lock
|
24
|
+
@value -= amount
|
25
|
+
@mutex.unlock
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
@@ -0,0 +1,121 @@
|
|
1
|
+
['atomic_integer', 'sleeper'].each { |lib| require File.join(File.dirname(__FILE__), lib) }
|
2
|
+
|
3
|
+
module Threadz
|
4
|
+
# A batch is a collection of jobs you care about that gets pushed off to
|
5
|
+
# the attached thread pool. The calling thread can be signaled when the
|
6
|
+
# batch has completed executing, or a block can be executed.
|
7
|
+
class Batch
|
8
|
+
# Creates a new batch attached to the given threadpool. A number of options
|
9
|
+
# are available:
|
10
|
+
# +:latent+:: If latent, none of the jobs in the batch will actually start
|
11
|
+
# executing until the +start+ method is called.
|
12
|
+
def initialize(threadpool, opts={})
|
13
|
+
@threadpool = threadpool
|
14
|
+
@waiting_threads = []
|
15
|
+
@job_lock = Mutex.new
|
16
|
+
@jobs_count = AtomicInteger.new(0)
|
17
|
+
@when_done_blocks = []
|
18
|
+
@sleeper = ::Threadz::Sleeper.new
|
19
|
+
|
20
|
+
## Options
|
21
|
+
|
22
|
+
#latent
|
23
|
+
@latent = opts.key?(:latent) ? opts[:latent] : false
|
24
|
+
if(@latent)
|
25
|
+
@started = false
|
26
|
+
else
|
27
|
+
@started = true
|
28
|
+
end
|
29
|
+
@job_queue = Queue.new if @latent
|
30
|
+
end
|
31
|
+
|
32
|
+
# Add a new job to the batch. If this is a latent batch, the job can't
|
33
|
+
# be scheduled until the batch is #start'ed; otherwise it may start
|
34
|
+
# immediately. The job can be anything that responds to +call+ or an
|
35
|
+
# array of objects that respond to +call+.
|
36
|
+
def push(job)
|
37
|
+
if job.is_a? Array
|
38
|
+
job.each {|j| self << j}
|
39
|
+
elsif job.respond_to? :call
|
40
|
+
@jobs_count.increment
|
41
|
+
if @latent && !@started
|
42
|
+
@job_queue << job
|
43
|
+
else
|
44
|
+
send_to_threadpool job
|
45
|
+
end
|
46
|
+
else
|
47
|
+
raise "Not a valid job: needs to support #call"
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
alias << push
|
52
|
+
|
53
|
+
# Put the current thread to sleep until the batch is done processing.
|
54
|
+
# There are options available:
|
55
|
+
# +:timeout+:: If specified, will only wait for at least this many seconds
|
56
|
+
# for the batch to finish. Typically used with #completed?
|
57
|
+
def wait_until_done(opts={})
|
58
|
+
return if completed?
|
59
|
+
|
60
|
+
raise "Threadz: thread deadlocked because batch job was never started" if @latent && !@started
|
61
|
+
|
62
|
+
timeout = opts.key?(:timeout) ? opts[:timeout] : 0
|
63
|
+
#raise "Timeout not supported at the moment" if timeout
|
64
|
+
|
65
|
+
@sleeper.wait(timeout)
|
66
|
+
end
|
67
|
+
|
68
|
+
# Returns true iff there are no unfinished jobs in the queue.
|
69
|
+
def completed?
|
70
|
+
return @jobs_count.value == 0
|
71
|
+
end
|
72
|
+
|
73
|
+
# If this is a latent batch, start processing all of the jobs in the queue.
|
74
|
+
def start
|
75
|
+
Thread.exclusive do # in case another thread tries to push new jobs onto the queue while we're starting
|
76
|
+
if @latent
|
77
|
+
@started = true
|
78
|
+
until @job_queue.empty?
|
79
|
+
send_to_threadpool @job_queue.pop
|
80
|
+
end
|
81
|
+
return true
|
82
|
+
else
|
83
|
+
return false
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
87
|
+
|
88
|
+
# Execute a given block when the batch has finished processing. If the batch
|
89
|
+
# has already finished executing, execute immediately.
|
90
|
+
def when_done(&block)
|
91
|
+
@job_lock.lock
|
92
|
+
if completed?
|
93
|
+
block.call
|
94
|
+
else
|
95
|
+
@when_done_blocks << block
|
96
|
+
end
|
97
|
+
@job_lock.unlock
|
98
|
+
end
|
99
|
+
|
100
|
+
private
|
101
|
+
def handle_done
|
102
|
+
@sleeper.broadcast
|
103
|
+
@when_done_blocks.each do |b|
|
104
|
+
b.call
|
105
|
+
end
|
106
|
+
@when_done_blocks = []
|
107
|
+
end
|
108
|
+
|
109
|
+
def send_to_threadpool(job)
|
110
|
+
@threadpool.process do
|
111
|
+
job.call
|
112
|
+
# Lock in case we get two threads at the "fork in the road" at the same time
|
113
|
+
@job_lock.lock
|
114
|
+
@jobs_count.decrement
|
115
|
+
# fork in the road
|
116
|
+
handle_done if completed?
|
117
|
+
@job_lock.unlock
|
118
|
+
end
|
119
|
+
end
|
120
|
+
end
|
121
|
+
end
|
@@ -0,0 +1,49 @@
|
|
1
|
+
require 'thread'
|
2
|
+
require 'timeout'
|
3
|
+
|
4
|
+
module Threadz
|
5
|
+
class Sleeper
|
6
|
+
def initialize
|
7
|
+
@waiters = Queue.new
|
8
|
+
end
|
9
|
+
|
10
|
+
def wait(timeout=0)
|
11
|
+
if(timeout == nil || timeout <= 0)
|
12
|
+
@waiters << Thread.current
|
13
|
+
Thread.stop
|
14
|
+
return true
|
15
|
+
else
|
16
|
+
begin
|
17
|
+
@waiters << Thread.current
|
18
|
+
status = Timeout::timeout(timeout) {
|
19
|
+
Thread.current[:'__THREADZ_IS_SLEEPING'] = true
|
20
|
+
Thread.stop
|
21
|
+
Thread.current[:'__THREADZ_IS_SLEEPING'] = false
|
22
|
+
}
|
23
|
+
return true
|
24
|
+
rescue Timeout::Error
|
25
|
+
return false
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
def signal
|
31
|
+
begin
|
32
|
+
begin
|
33
|
+
waiter = @waiters.pop(true)
|
34
|
+
rescue ThreadError => e
|
35
|
+
end
|
36
|
+
end while waiter[:'__THREADZ_IS_SLEEPING']
|
37
|
+
waiter.wakeup if waiter
|
38
|
+
end
|
39
|
+
|
40
|
+
def broadcast
|
41
|
+
while !@waiters.empty?
|
42
|
+
begin
|
43
|
+
@waiters.pop(true).wakeup
|
44
|
+
rescue ThreadError => e
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
@@ -0,0 +1,118 @@
|
|
1
|
+
require 'thread'
|
2
|
+
|
3
|
+
module Threadz
|
4
|
+
|
5
|
+
# The ThreadPool class contains all the threads available to whatever context
|
6
|
+
# has access to it.
|
7
|
+
class ThreadPool
|
8
|
+
# Default setting for kill threshold
|
9
|
+
KILL_THRESHOLD = 10
|
10
|
+
# Setting for how much to decrement current kill score by for each queued job
|
11
|
+
THREADS_BUSY_SCORE = 1
|
12
|
+
# Setting for how much to increment current kill score by for *each* idle thread
|
13
|
+
THREADS_IDLE_SCORE = 1
|
14
|
+
|
15
|
+
# Creates a new thread pool into which you can queue jobs.
|
16
|
+
# There are a number of options:
|
17
|
+
# :initial_size:: The number of threads you start out with initially. Also, the minimum number of threads.
|
18
|
+
# By default, this is 10.
|
19
|
+
# :maximum_size:: The highest number of threads that can be allocated. By default, this is the minimum size x 5.
|
20
|
+
# :kill_threshold:: Constant that determines when new threads are needed or when threads can be killed off.
|
21
|
+
# If the internally tracked kill score falls to positive kill_threshold, then a thread is killed off and the
|
22
|
+
# kill score is reset. If the kill score rises to negative kill_threshold, then a new thread
|
23
|
+
# is created and the kill score is reset. Every 0.1 seconds, the state of all threads in the
|
24
|
+
# pool is checked. If there is more than one idle thread (and we're above minimum size), the
|
25
|
+
# kill score is incremented by THREADS_IDLE_SCORE for each idle thread. If there are no idle threads
|
26
|
+
# (and we're below maximum size) the kill score is decremented by THREADS_KILL_SCORE for each queued job.
|
27
|
+
def initialize(opts={})
|
28
|
+
@min_size = opts[:initial_size] || 10 # documented
|
29
|
+
@max_size = opts[:maximum_size] || @min_size * 5 # documented
|
30
|
+
|
31
|
+
# This is our main queue for jobs
|
32
|
+
@queue = Queue.new
|
33
|
+
@worker_threads_count = AtomicInteger.new(0)
|
34
|
+
@min_size.times { spawn_thread }
|
35
|
+
@killscore = 0
|
36
|
+
@killthreshold = opts[:kill_threshold] || KILL_THRESHOLD # documented
|
37
|
+
|
38
|
+
spawn_watch_thread
|
39
|
+
end
|
40
|
+
|
41
|
+
def thread_count
|
42
|
+
@worker_threads_count.value
|
43
|
+
end
|
44
|
+
|
45
|
+
# Push a process onto the job queue for the thread pool to pick up.
|
46
|
+
# Note that using this method, you can't keep track of when the job
|
47
|
+
# finishes. If you care about when it finishes, use batches.
|
48
|
+
def process(&block)
|
49
|
+
@queue << block
|
50
|
+
nil
|
51
|
+
end
|
52
|
+
|
53
|
+
# Return a new batch that's attached into this thread pool. See Threadz::ThreadPool::Batch
|
54
|
+
# for documention on opts.
|
55
|
+
def new_batch(opts={})
|
56
|
+
return Batch.new(self, opts)
|
57
|
+
end
|
58
|
+
|
59
|
+
private
|
60
|
+
|
61
|
+
# Spin up a new thread
|
62
|
+
def spawn_thread
|
63
|
+
Thread.new do
|
64
|
+
while true
|
65
|
+
x = @queue.shift
|
66
|
+
if x == Directive::SUICIDE_PILL
|
67
|
+
@worker_threads_count.decrement
|
68
|
+
Thread.current.terminate
|
69
|
+
end
|
70
|
+
Thread.pass
|
71
|
+
begin
|
72
|
+
x.call
|
73
|
+
rescue StandardError => e
|
74
|
+
$stderr.puts "Threadz: Error in thread, but restarting with next job: #{e.inspect}\n#{e.backtrace.join("\n")}"
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
78
|
+
@worker_threads_count.increment
|
79
|
+
end
|
80
|
+
|
81
|
+
# Kill a thread after it completes its current job
|
82
|
+
def kill_thread
|
83
|
+
@queue.unshift(Directive::SUICIDE_PILL)
|
84
|
+
end
|
85
|
+
|
86
|
+
# This thread watches over the pool and allocated and deallocates threads
|
87
|
+
# as necessary
|
88
|
+
def spawn_watch_thread
|
89
|
+
@watch_thread = Thread.new do
|
90
|
+
while true
|
91
|
+
# If there are idle threads and we're above minimum
|
92
|
+
if @queue.num_waiting > 0 && @worker_threads_count.value > @min_size # documented
|
93
|
+
@killscore += THREADS_IDLE_SCORE * @queue.num_waiting
|
94
|
+
|
95
|
+
# If there are no threads idle and we have room for more
|
96
|
+
elsif(@queue.num_waiting == 0 && @worker_threads_count.value < @max_size) # documented
|
97
|
+
@killscore -= THREADS_BUSY_SCORE * @queue.length
|
98
|
+
|
99
|
+
else
|
100
|
+
# Decay,
|
101
|
+
if(@killscore != 0)
|
102
|
+
@killscore *= 0.9
|
103
|
+
end
|
104
|
+
if(@killscore.abs < 1)
|
105
|
+
@killscore = 0
|
106
|
+
end
|
107
|
+
end
|
108
|
+
if @killscore.abs >= @killthreshold
|
109
|
+
@killscore > 0 ? kill_thread : spawn_thread
|
110
|
+
@killscore = 0
|
111
|
+
end
|
112
|
+
puts "killscore: #{@killscore}. waiting: #{@queue.num_waiting}. threads length: #{@worker_threads_count.value}. min/max: [#{@min_size}, #{@max_size}]" if $DEBUG
|
113
|
+
sleep 0.1
|
114
|
+
end
|
115
|
+
end
|
116
|
+
end
|
117
|
+
end
|
118
|
+
end
|
@@ -0,0 +1,47 @@
|
|
1
|
+
$LOAD_PATH.unshift File.expand_path(File.dirname(__FILE__))
|
2
|
+
require 'spec_helper'
|
3
|
+
|
4
|
+
describe Threadz do
|
5
|
+
describe Fixnum do
|
6
|
+
it "should perform badly when under heavy thread usage" do
|
7
|
+
# This test should always fail, but there is a small chance it won't...
|
8
|
+
|
9
|
+
i = 0
|
10
|
+
n = 10_000
|
11
|
+
threads = 10
|
12
|
+
t = []
|
13
|
+
threads.times do
|
14
|
+
t << Thread.new do
|
15
|
+
sleep 0.05
|
16
|
+
n.times { i += 1 }
|
17
|
+
end
|
18
|
+
t << Thread.new do
|
19
|
+
sleep 0.05
|
20
|
+
n.times { i -= 1 }
|
21
|
+
end
|
22
|
+
end
|
23
|
+
t.each { |thread| thread.join }
|
24
|
+
i.should_not == 0
|
25
|
+
end
|
26
|
+
end
|
27
|
+
describe Threadz::AtomicInteger do
|
28
|
+
it "should perform better than an int for counting" do
|
29
|
+
i = Threadz::AtomicInteger.new(0)
|
30
|
+
n = 10_000
|
31
|
+
threads = 10
|
32
|
+
t = []
|
33
|
+
threads.times do
|
34
|
+
t << Thread.new do
|
35
|
+
sleep 0.05
|
36
|
+
n.times { i.increment }
|
37
|
+
end
|
38
|
+
t << Thread.new do
|
39
|
+
sleep 0.05
|
40
|
+
n.times { i.decrement }
|
41
|
+
end
|
42
|
+
end
|
43
|
+
t.each { |thread| thread.join }
|
44
|
+
i.value.should == 0
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
data/spec/spec_helper.rb
ADDED
@@ -0,0 +1,253 @@
|
|
1
|
+
$LOAD_PATH.unshift File.expand_path(File.dirname(__FILE__))
|
2
|
+
require 'spec_helper'
|
3
|
+
|
4
|
+
require 'net/http'
|
5
|
+
|
6
|
+
describe Threadz do
|
7
|
+
describe Threadz::ThreadPool do
|
8
|
+
before(:each) do
|
9
|
+
@T = Threadz::ThreadPool.new
|
10
|
+
end
|
11
|
+
|
12
|
+
it "should support process" do
|
13
|
+
i = 0
|
14
|
+
3.times do
|
15
|
+
@T.process { i += 1}
|
16
|
+
end
|
17
|
+
sleep 0.1
|
18
|
+
|
19
|
+
i.should == 3
|
20
|
+
end
|
21
|
+
|
22
|
+
it "should support creating batches" do
|
23
|
+
i = 0
|
24
|
+
|
25
|
+
lambda { @T.new_batch }.should_not raise_error
|
26
|
+
lambda { @T.new_batch(:latent => true) }.should_not raise_error
|
27
|
+
end
|
28
|
+
|
29
|
+
it "should perform well for IO jobs" do
|
30
|
+
urls = []
|
31
|
+
urls << "http://www.google.com/" << "http://www.yahoo.com/" << 'http://www.microsoft.com/'
|
32
|
+
urls << "http://www.cnn.com/" << "http://slashdot.org/" << "http://www.mozilla.org/"
|
33
|
+
urls << "http://www.ubuntu.com/" << "http://github.com/"
|
34
|
+
time_single_threaded = Time.now
|
35
|
+
|
36
|
+
begin
|
37
|
+
(urls * 3).each do |url|
|
38
|
+
response = Net::HTTP.get_response(URI.parse(url))
|
39
|
+
body = response.body
|
40
|
+
end
|
41
|
+
|
42
|
+
time_single_threaded = Time.now - time_single_threaded
|
43
|
+
|
44
|
+
time_multi_threaded = Time.now
|
45
|
+
b = @T.new_batch
|
46
|
+
(urls * 3).each do |url|
|
47
|
+
b << Proc.new do
|
48
|
+
response = Net::HTTP.get_response(URI.parse(url))
|
49
|
+
body = response.body
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
b.wait_until_done
|
54
|
+
time_multi_threaded = Time.now - time_multi_threaded
|
55
|
+
|
56
|
+
time_multi_threaded.should < time_single_threaded
|
57
|
+
|
58
|
+
rescue SocketError
|
59
|
+
pending "pending working internet connection"
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
describe Threadz::Batch do
|
64
|
+
it "should support jobs" do
|
65
|
+
i = 0
|
66
|
+
b = @T.new_batch
|
67
|
+
10.times do
|
68
|
+
b << lambda { i += 1 }
|
69
|
+
b << Proc.new { i += 1 }
|
70
|
+
end
|
71
|
+
b.wait_until_done
|
72
|
+
|
73
|
+
i.should == 20
|
74
|
+
end
|
75
|
+
|
76
|
+
it "should support arrays of jobs" do
|
77
|
+
i = 0
|
78
|
+
b = @T.new_batch
|
79
|
+
b << [lambda { i += 2}, lambda { i -= 1}]
|
80
|
+
b << [lambda { i += 2}]
|
81
|
+
b << lambda { i += 1 }
|
82
|
+
b.wait_until_done
|
83
|
+
|
84
|
+
i.should == 4
|
85
|
+
end
|
86
|
+
|
87
|
+
it "should support reuse" do
|
88
|
+
i = 0
|
89
|
+
b = @T.new_batch
|
90
|
+
b << [lambda { i += 2}, lambda { i -= 1}, lambda { i -= 2 }]
|
91
|
+
b.wait_until_done
|
92
|
+
|
93
|
+
i.should == -1
|
94
|
+
|
95
|
+
b << [lambda { i += 9}, lambda { i -= 3}, lambda { i -= 4 }]
|
96
|
+
b.wait_until_done
|
97
|
+
|
98
|
+
i.should == 1
|
99
|
+
end
|
100
|
+
|
101
|
+
it "should play nicely with instance variables" do
|
102
|
+
@i = 0
|
103
|
+
b = @T.new_batch
|
104
|
+
b << [lambda { @i += 2}, lambda { @i -= 1}]
|
105
|
+
b << lambda { @i += 2}
|
106
|
+
b.wait_until_done
|
107
|
+
|
108
|
+
@i.should == 3
|
109
|
+
end
|
110
|
+
|
111
|
+
it "should support latent option correctly" do
|
112
|
+
i = 0
|
113
|
+
b = @T.new_batch(:latent => true)
|
114
|
+
b << lambda { i += 1 }
|
115
|
+
b << lambda { i -= 1 }
|
116
|
+
b << [lambda { i += 2}, lambda { i -= 1}]
|
117
|
+
|
118
|
+
i.should == 0
|
119
|
+
|
120
|
+
sleep 0.1
|
121
|
+
|
122
|
+
i.should == 0
|
123
|
+
|
124
|
+
b.start
|
125
|
+
b.wait_until_done
|
126
|
+
|
127
|
+
i.should == 1
|
128
|
+
end
|
129
|
+
|
130
|
+
it "should support waiting with timeouts" do
|
131
|
+
i = 0
|
132
|
+
b = @T.new_batch
|
133
|
+
b << lambda { i += 1 }
|
134
|
+
b << lambda { i -= 1 }
|
135
|
+
b << [lambda { i += 2}, lambda { 500000000.times { i += 1}}]
|
136
|
+
t = Time.now
|
137
|
+
timeout = 0.2
|
138
|
+
b.wait_until_done(:timeout => timeout)
|
139
|
+
|
140
|
+
b.completed?.should be_false
|
141
|
+
(Time.now - t).should >= timeout
|
142
|
+
i.should > 2
|
143
|
+
end
|
144
|
+
|
145
|
+
it "should support 'completed?' even without timeouts" do
|
146
|
+
i = 0
|
147
|
+
b = @T.new_batch
|
148
|
+
b << lambda { i += 1 }
|
149
|
+
b << lambda { i -= 1 }
|
150
|
+
b << [lambda { i += 2}, lambda { sleep 0.01 while i < 10 }]
|
151
|
+
|
152
|
+
b.completed?.should be_false
|
153
|
+
|
154
|
+
sleep 0.1
|
155
|
+
|
156
|
+
b.completed?.should be_false
|
157
|
+
|
158
|
+
i = 10
|
159
|
+
sleep 0.1
|
160
|
+
|
161
|
+
b.completed?.should be_true
|
162
|
+
end
|
163
|
+
|
164
|
+
it "should support 'push'" do
|
165
|
+
i = 0
|
166
|
+
b = @T.new_batch
|
167
|
+
b.push(lambda { i += 1 })
|
168
|
+
b.push([lambda { i += 1 }, lambda { i += 1 }])
|
169
|
+
b.wait_until_done
|
170
|
+
|
171
|
+
i.should == 3
|
172
|
+
end
|
173
|
+
|
174
|
+
it "should support 'when_done'" do
|
175
|
+
i = 0
|
176
|
+
when_done_executed = false
|
177
|
+
b = @T.new_batch(:latent => true)
|
178
|
+
|
179
|
+
100.times { b << lambda { i += 1 } }
|
180
|
+
|
181
|
+
b.when_done { when_done_executed = true }
|
182
|
+
|
183
|
+
when_done_executed.should be_false
|
184
|
+
|
185
|
+
b.start
|
186
|
+
|
187
|
+
sleep(0.1)
|
188
|
+
|
189
|
+
b.completed?.should be_true
|
190
|
+
when_done_executed.should be_true
|
191
|
+
end
|
192
|
+
|
193
|
+
it "should call 'when_done' immediately when batch is already done" do
|
194
|
+
i = 0
|
195
|
+
when_done_executed = false
|
196
|
+
b = @T.new_batch
|
197
|
+
|
198
|
+
Thread.exclusive do
|
199
|
+
100.times { b << lambda { i += 1 } }
|
200
|
+
end
|
201
|
+
|
202
|
+
b.wait_until_done
|
203
|
+
|
204
|
+
b.completed?.should be_true
|
205
|
+
|
206
|
+
b.when_done { when_done_executed = true }
|
207
|
+
|
208
|
+
when_done_executed.should be_true
|
209
|
+
end
|
210
|
+
|
211
|
+
it "should support multiple 'when_done' blocks" do
|
212
|
+
i = 0
|
213
|
+
when_done_executed = 0
|
214
|
+
b = @T.new_batch
|
215
|
+
|
216
|
+
# We're not testing what happens when 'when_done' is called and
|
217
|
+
# the batch is already finished, so wrapping in Thread#exclusive
|
218
|
+
Thread.exclusive do
|
219
|
+
100.times { b << lambda { i += 1 } }
|
220
|
+
end
|
221
|
+
|
222
|
+
3.times { b.when_done { when_done_executed += 1 } }
|
223
|
+
|
224
|
+
sleep(0.1)
|
225
|
+
|
226
|
+
b.completed?.should be_true
|
227
|
+
when_done_executed.should == 3
|
228
|
+
end
|
229
|
+
|
230
|
+
it "shouldn't fail under load" do
|
231
|
+
jobs = 1000
|
232
|
+
times_per_job = 100
|
233
|
+
i = ::Threadz::AtomicInteger.new(0)
|
234
|
+
|
235
|
+
b1 = @T.new_batch(:latent => true)
|
236
|
+
b2 = @T.new_batch(:latent => true)
|
237
|
+
|
238
|
+
jobs.times do
|
239
|
+
b1 << lambda { times_per_job.times { i.increment } }
|
240
|
+
b2 << lambda { times_per_job.times { i.decrement } }
|
241
|
+
end
|
242
|
+
|
243
|
+
b1.start
|
244
|
+
b2.start
|
245
|
+
|
246
|
+
b1.wait_until_done
|
247
|
+
b2.wait_until_done
|
248
|
+
|
249
|
+
i.value.should == 0
|
250
|
+
end
|
251
|
+
end
|
252
|
+
end
|
253
|
+
end
|
data/threadz.gemspec
ADDED
@@ -0,0 +1,56 @@
|
|
1
|
+
# Generated by jeweler
|
2
|
+
# DO NOT EDIT THIS FILE DIRECTLY
|
3
|
+
# Instead, edit Jeweler::Tasks in Rakefile, and run the gemspec command
|
4
|
+
# -*- encoding: utf-8 -*-
|
5
|
+
|
6
|
+
Gem::Specification.new do |s|
|
7
|
+
s.name = %q{threadz}
|
8
|
+
s.version = "0.1.0"
|
9
|
+
|
10
|
+
s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
|
11
|
+
s.authors = ["Max Aller"]
|
12
|
+
s.date = %q{2009-12-19}
|
13
|
+
s.email = %q{nanodeath@gmail.com}
|
14
|
+
s.extra_rdoc_files = [
|
15
|
+
"README.rdoc"
|
16
|
+
]
|
17
|
+
s.files = [
|
18
|
+
".gitignore",
|
19
|
+
"CHANGELOG",
|
20
|
+
"MIT-LICENSE",
|
21
|
+
"README.rdoc",
|
22
|
+
"Rakefile",
|
23
|
+
"VERSION",
|
24
|
+
"lib/threadz.rb",
|
25
|
+
"lib/threadz/atomic_integer.rb",
|
26
|
+
"lib/threadz/batch.rb",
|
27
|
+
"lib/threadz/directive.rb",
|
28
|
+
"lib/threadz/sleeper.rb",
|
29
|
+
"lib/threadz/thread_pool.rb",
|
30
|
+
"spec/atomic_integer_spec.rb",
|
31
|
+
"spec/spec_helper.rb",
|
32
|
+
"spec/threadz_spec.rb",
|
33
|
+
"threadz.gemspec"
|
34
|
+
]
|
35
|
+
s.homepage = %q{http://github.com/nanodeath/threadz}
|
36
|
+
s.rdoc_options = ["--charset=UTF-8"]
|
37
|
+
s.require_paths = ["lib"]
|
38
|
+
s.rubygems_version = %q{1.3.5}
|
39
|
+
s.summary = %q{A Ruby threadpool library to handle threadpools and make batch jobs easier.}
|
40
|
+
s.test_files = [
|
41
|
+
"spec/atomic_integer_spec.rb",
|
42
|
+
"spec/threadz_spec.rb",
|
43
|
+
"spec/spec_helper.rb"
|
44
|
+
]
|
45
|
+
|
46
|
+
if s.respond_to? :specification_version then
|
47
|
+
current_version = Gem::Specification::CURRENT_SPECIFICATION_VERSION
|
48
|
+
s.specification_version = 3
|
49
|
+
|
50
|
+
if Gem::Version.new(Gem::RubyGemsVersion) >= Gem::Version.new('1.2.0') then
|
51
|
+
else
|
52
|
+
end
|
53
|
+
else
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
metadata
ADDED
@@ -0,0 +1,72 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: threadz
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.1.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Max Aller
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
|
12
|
+
date: 2009-12-19 00:00:00 -08:00
|
13
|
+
default_executable:
|
14
|
+
dependencies: []
|
15
|
+
|
16
|
+
description:
|
17
|
+
email: nanodeath@gmail.com
|
18
|
+
executables: []
|
19
|
+
|
20
|
+
extensions: []
|
21
|
+
|
22
|
+
extra_rdoc_files:
|
23
|
+
- README.rdoc
|
24
|
+
files:
|
25
|
+
- .gitignore
|
26
|
+
- CHANGELOG
|
27
|
+
- MIT-LICENSE
|
28
|
+
- README.rdoc
|
29
|
+
- Rakefile
|
30
|
+
- VERSION
|
31
|
+
- lib/threadz.rb
|
32
|
+
- lib/threadz/atomic_integer.rb
|
33
|
+
- lib/threadz/batch.rb
|
34
|
+
- lib/threadz/directive.rb
|
35
|
+
- lib/threadz/sleeper.rb
|
36
|
+
- lib/threadz/thread_pool.rb
|
37
|
+
- spec/atomic_integer_spec.rb
|
38
|
+
- spec/spec_helper.rb
|
39
|
+
- spec/threadz_spec.rb
|
40
|
+
- threadz.gemspec
|
41
|
+
has_rdoc: true
|
42
|
+
homepage: http://github.com/nanodeath/threadz
|
43
|
+
licenses: []
|
44
|
+
|
45
|
+
post_install_message:
|
46
|
+
rdoc_options:
|
47
|
+
- --charset=UTF-8
|
48
|
+
require_paths:
|
49
|
+
- lib
|
50
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
51
|
+
requirements:
|
52
|
+
- - ">="
|
53
|
+
- !ruby/object:Gem::Version
|
54
|
+
version: "0"
|
55
|
+
version:
|
56
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
57
|
+
requirements:
|
58
|
+
- - ">="
|
59
|
+
- !ruby/object:Gem::Version
|
60
|
+
version: "0"
|
61
|
+
version:
|
62
|
+
requirements: []
|
63
|
+
|
64
|
+
rubyforge_project:
|
65
|
+
rubygems_version: 1.3.5
|
66
|
+
signing_key:
|
67
|
+
specification_version: 3
|
68
|
+
summary: A Ruby threadpool library to handle threadpools and make batch jobs easier.
|
69
|
+
test_files:
|
70
|
+
- spec/atomic_integer_spec.rb
|
71
|
+
- spec/threadz_spec.rb
|
72
|
+
- spec/spec_helper.rb
|