chantier 0.0.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: 02886ce2234ab79bd1088a0ef7964d3a52046a94
4
+ data.tar.gz: e9facf80639b5f4cee600a21356a51f85181c699
5
+ SHA512:
6
+ metadata.gz: ee8e5e32959c699e69bb4d33a5d5101843a24427ed0d61db8d73fe4ce34bf769949d0554e61457faa3597aef4408dbccca8ed92c71d2ffaa83bdf5da175eb378
7
+ data.tar.gz: efe1866c848fa2ebc49a8324b10741725eaea701ad4cdd69aed35bf41852f530473c6e29ce29320466e67cec888e973e698649be983e3bb6bd58b362abb5f5fa
data/.document ADDED
@@ -0,0 +1,5 @@
1
+ lib/**/*.rb
2
+ bin/*
3
+ -
4
+ features/**/*.feature
5
+ LICENSE.txt
data/.rspec ADDED
@@ -0,0 +1 @@
1
+ --color
data/Gemfile ADDED
@@ -0,0 +1,9 @@
1
+ source "http://rubygems.org"
2
+
3
+ group :development do
4
+ gem "rspec", "~> 2.9"
5
+ gem "rdoc", "~> 3.12"
6
+ gem "bundler", "~> 1.0"
7
+ gem "jeweler", "~> 2.0.1"
8
+ gem "simplecov", ">= 0"
9
+ end
data/LICENSE.txt ADDED
@@ -0,0 +1,20 @@
1
+ Copyright (c) 2014 Julik Tarkhanov
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining
4
+ a copy of this software and associated documentation files (the
5
+ "Software"), to deal in the Software without restriction, including
6
+ without limitation the rights to use, copy, modify, merge, publish,
7
+ distribute, sublicense, and/or sell copies of the Software, and to
8
+ permit persons to whom the Software is furnished to do so, subject to
9
+ the following conditions:
10
+
11
+ The above copyright notice and this permission notice shall be
12
+ included in all copies or substantial portions of the Software.
13
+
14
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
data/README.rdoc ADDED
@@ -0,0 +1,43 @@
1
+ = chantier
2
+
3
+ Dead-simple task manager for "fire and forget" jobs. Has two interchangeable pools - processes and threads, which
4
+ are interchangeable.
5
+
6
+ The only thing Chantier checks for is that the spun off tasks have completed. It also limits the number of tasks
7
+ active at the same time. Your code will block until a slot becomes available for a task.
8
+
9
+ manager = Chantier::ProcessPool.new(slots = 4) # You can also use ThreadPool
10
+ jobs_hose.each_job do | job |
11
+ manager.fork_task do # this call will block until a slot becomes available
12
+ Churner.new(job).churn # this block runs in a subprocess
13
+ end
14
+ manager.still_running? # => most likely "true"
15
+ end
16
+
17
+ manager.block_until_complete! #=> Will block until all the subprocesses have terminated
18
+
19
+ If you have a finite Enumerable at hand you can also launch it into the ProcessPool, like so:
20
+
21
+ manager = Chantier::ThreadPool.new(slots = 4)
22
+
23
+ manager.map_fork(job_tickets) do | job_ticket |
24
+ # this block will run in a forked subprocess
25
+ Churner.new(job).churn
26
+ ...
27
+ end
28
+
29
+ == Contributing to chantier
30
+
31
+ * Check out the latest master to make sure the feature hasn't been implemented or the bug hasn't been fixed yet.
32
+ * Check out the issue tracker to make sure someone already hasn't requested it and/or contributed it.
33
+ * Fork the project.
34
+ * Start a feature/bugfix branch.
35
+ * Commit and push until you are happy with your contribution.
36
+ * Make sure to add tests for it. This is important so I don't break it in a future version unintentionally.
37
+ * Please try not to mess with the Rakefile, version, or history. If you want to have your own version, or is otherwise necessary, that is fine, but please isolate to its own commit so I can cherry-pick around it.
38
+
39
+ == Copyright
40
+
41
+ Copyright (c) 2014 Julik Tarkhanov. See LICENSE.txt for
42
+ further details.
43
+
data/Rakefile ADDED
@@ -0,0 +1,53 @@
1
+ # encoding: utf-8
2
+
3
+ require 'rubygems'
4
+ require 'bundler'
5
+ require_relative 'lib/chantier'
6
+
7
+ begin
8
+ Bundler.setup(:default, :development)
9
+ rescue Bundler::BundlerError => e
10
+ $stderr.puts e.message
11
+ $stderr.puts "Run `bundle install` to install missing gems"
12
+ exit e.status_code
13
+ end
14
+ require 'rake'
15
+
16
+ require 'jeweler'
17
+ Jeweler::Tasks.new do |gem|
18
+ # gem is a Gem::Specification... see http://guides.rubygems.org/specification-reference/ for more options
19
+ gem.version = Chantier::VERSION
20
+ gem.name = "chantier"
21
+ gem.homepage = "http://github.com/julik/chantier"
22
+ gem.license = "MIT"
23
+ gem.summary = %Q{ Dead-simple worker table based multiprocessing/multithreading }
24
+ gem.description = %Q{ Process your jobs in parallel with a simple table of processes or threads }
25
+ gem.email = "me@julik.nl"
26
+ gem.authors = ["Julik Tarkhanov"]
27
+ # dependencies defined in Gemfile
28
+ end
29
+ Jeweler::RubygemsDotOrgTasks.new
30
+
31
+ require 'rspec/core'
32
+ require 'rspec/core/rake_task'
33
+ RSpec::Core::RakeTask.new(:spec) do |spec|
34
+ spec.pattern = FileList['spec/**/*_spec.rb']
35
+ end
36
+
37
+ desc "Code coverage detail"
38
+ task :simplecov do
39
+ ENV['COVERAGE'] = "true"
40
+ Rake::Task['spec'].execute
41
+ end
42
+
43
+ task :default => :spec
44
+
45
+ require 'rdoc/task'
46
+ Rake::RDocTask.new do |rdoc|
47
+ version = File.exist?('VERSION') ? File.read('VERSION') : ""
48
+
49
+ rdoc.rdoc_dir = 'rdoc'
50
+ rdoc.title = "chantier #{version}"
51
+ rdoc.rdoc_files.include('README*')
52
+ rdoc.rdoc_files.include('lib/**/*.rb')
53
+ end
data/lib/chantier.rb ADDED
@@ -0,0 +1,5 @@
1
+ module Chantier
2
+ VERSION = '0.0.1'
3
+ require_relative 'process_pool'
4
+ require_relative 'thread_pool'
5
+ end
@@ -0,0 +1,128 @@
1
+ # Allows you to spin off a pool of subprocesses that is not larger than X, and
2
+ # maintains a pool of those proceses. You can then enqueue tasks to be executed
3
+ # within that pool. When all slots are full the caller will be blocked until a slot becomes
4
+ # available.
5
+ #
6
+ # manager = ProcessPool.new(slots = 4)
7
+ # jobs_hose.each_job do | job |
8
+ # # this call will block until a slot becomes available
9
+ # manager.fork_task do # this block runs in a subprocess
10
+ # Churner.new(job).churn
11
+ # end
12
+ # manager.still_running? # => most likely "true"
13
+ # end
14
+ #
15
+ # manager.block_until_complete! #=> Will block until all the subprocesses have terminated
16
+ #
17
+ # If you have a finite Enumerable at hand you can also launch it into the ProcessPool, like so:
18
+ #
19
+ # manager = ProcessPool.new(slots = 4)
20
+ #
21
+ # manager.map_fork(job_tickets) do | job_ticket |
22
+ # # this block will run in a forked subprocess
23
+ # Churner.new(job).churn
24
+ # ...
25
+ # end
26
+ #
27
+ # Can be rewritten using Threads if operation on JVM/Rubinius will be feasible.
28
+ class Chantier::ProcessPool
29
+ # Kill the spawned processes after at most X seconds
30
+ KILL_AFTER_SECONDS = 60 * 2
31
+
32
+ # http://linuxman.wikispaces.com/killing+me+softly
33
+ TERMINATION_SIGNALS = %w( TERM HUP INT QUIT PIPE KILL )
34
+
35
+ # The manager uses loops in a few places. By doing a little sleep()
36
+ # in those loops we can yield process control back to the OS which brings
37
+ # the CPU usage of the managing process to small numbers. If you just do
38
+ # a loop {} MRI will saturate a whole core and not let go off of it until
39
+ # the loop returns.
40
+ SCHEDULER_SLEEP_SECONDS = 0.05
41
+
42
+ def initialize(num_procs)
43
+ raise "Need at least 1 slot, given #{num_procs.to_i}" unless num_procs.to_i > 0
44
+ @pids = [nil] * num_procs.to_i
45
+ @semaphore = Mutex.new
46
+ end
47
+
48
+ # Distributes the elements in the given Enumerable to parallel workers,
49
+ # N workers at a time. The method will return once all the workers for all
50
+ # the elements of the Enumerable have terminated.
51
+ #
52
+ # pool = ProcessPool.new(5)
53
+ # pool.map_fork(array_of_urls) do | single_url |
54
+ # Faraday.get(single_url).response ...
55
+ # ...
56
+ # ...
57
+ # end
58
+ def map_fork(arguments_per_job, &blk)
59
+ arguments_per_job.each do | single_block_argument |
60
+ fork_task { yield(single_block_argument) }
61
+ end
62
+ block_until_complete!
63
+ end
64
+
65
+ # Run the given block in a forked subprocess. This method will block
66
+ # the thread it is called from until a slot in the process table
67
+ # becomes free. Once that happens, the given block will be forked off
68
+ # and the method will return.
69
+ def fork_task(&blk)
70
+ destination_slot_idx = nil
71
+
72
+ # Try to find a slot in the process table where this job can go
73
+ catch :_found do
74
+ loop do
75
+ @semaphore.synchronize do
76
+ if destination_slot_idx = @pids.index(nil)
77
+ @pids[destination_slot_idx] = true # occupy it
78
+ throw :_found
79
+ end
80
+ end
81
+ sleep SCHEDULER_SLEEP_SECONDS # Breathing room
82
+ end
83
+ end
84
+
85
+ task_pid = fork(&blk)
86
+
87
+ # No need to lock this because we already reserved that slot
88
+ @pids[destination_slot_idx] = task_pid
89
+
90
+ puts("Spun off a task process #{task_pid} into slot #{destination_slot_idx}") if $VERBOSE
91
+
92
+ # Dispatch the watcher thread that will record that the process has quit into the
93
+ # process table
94
+ Thread.new do
95
+ Process.wait(task_pid) # This call will block until that process quites
96
+ # Now we can remove that process from the process table
97
+ @semaphore.synchronize { @pids[destination_slot_idx] = nil }
98
+ end
99
+
100
+ # Dispatch the killer thread which kicks in after KILL_AFTER_SECONDS.
101
+ # Note that we do not manage the @pids table here because once the process
102
+ # gets terminated it will bounce back to the standard wait() above.
103
+ Thread.new do
104
+ sleep KILL_AFTER_SECONDS
105
+ begin
106
+ TERMINATION_SIGNALS.each do | sig |
107
+ Process.kill(sig, task_pid)
108
+ sleep 5 # Give it some time to react
109
+ end
110
+ rescue Errno::ESRCH
111
+ # It has already quit, nothing to do
112
+ end
113
+ end
114
+ end
115
+
116
+ # Tells whether some processes are still churning
117
+ def still_running?
118
+ @pids.any?{|e| e }
119
+ end
120
+
121
+ # Analogous to Process.wait or wait_all - will block until all of the process slots have been freed.
122
+ def block_until_complete!
123
+ loop do
124
+ return unless still_running?
125
+ sleep SCHEDULER_SLEEP_SECONDS # Breathing room
126
+ end
127
+ end
128
+ end
@@ -0,0 +1,101 @@
1
+ # Allows you to spin off a pool of Threads that is not larger than X.
2
+ # You can then enqueue tasks to be executed within that pool.
3
+ # When all slots are full the caller will be blocked until a slot becomes
4
+ # available.
5
+ #
6
+ # manager = ThreadPool.new(slots = 4)
7
+ # jobs_hose.each_job do | job |
8
+ # # this call will block until a slot becomes available
9
+ # manager.fork_task do # this block runs in a subprocess
10
+ # Churner.new(job).churn
11
+ # end
12
+ # manager.still_running? # => most likely "true"
13
+ # end
14
+ #
15
+ # manager.block_until_complete! #=> Will block until all the subprocesses have terminated
16
+ #
17
+ # If you have a finite Enumerable at hand you can also launch it into the ThreadPool, like so:
18
+ #
19
+ # manager = ThreadPool.new(slots = 4)
20
+ #
21
+ # manager.map_fork(job_tickets) do | job_ticket |
22
+ # # this block will run in a forked subprocess
23
+ # Churner.new(job).churn
24
+ # ...
25
+ # end
26
+ #
27
+ # Can be rewritten using Threads if operation on JVM/Rubinius will be feasible.
28
+ class Chantier::ThreadPool
29
+
30
+ # The manager uses loops in a few places. By doing a little sleep()
31
+ # in those loops we can yield process control back to the OS which brings
32
+ # the CPU usage of the managing process to small numbers. If you just do
33
+ # a loop {} MRI will saturate a whole core and not let go off of it until
34
+ # the loop returns.
35
+ SCHEDULER_SLEEP_SECONDS = 0.05
36
+
37
+ def initialize(num_threads)
38
+ raise "Need at least 1 slot, given #{num_threads.to_i}" unless num_threads.to_i > 0
39
+ @threads = [nil] * num_threads.to_i
40
+ @semaphore = Mutex.new
41
+ end
42
+
43
+ # Distributes the elements in the given Enumerable to parallel workers,
44
+ # N workers at a time. The method will return once all the workers for all
45
+ # the elements of the Enumerable have terminated.
46
+ #
47
+ # pool = ThreadPool.new(5)
48
+ # pool.map_fork(array_of_urls) do | single_url |
49
+ # Faraday.get(single_url).response ...
50
+ # ...
51
+ # ...
52
+ # end
53
+ def map_fork(arguments_per_job, &blk)
54
+ arguments_per_job.each do | single_block_argument |
55
+ fork_task { yield(single_block_argument) }
56
+ end
57
+ block_until_complete!
58
+ end
59
+
60
+ # Run the given block in a thread. This method will block
61
+ # the thread it is called from until a slot in the thread table
62
+ # becomes free.
63
+ def fork_task(&blk)
64
+ destination_slot_idx = nil
65
+
66
+ # Try to find a slot in the process table where this job can go
67
+ catch :_found do
68
+ loop do
69
+ @semaphore.synchronize do
70
+ if destination_slot_idx = @threads.index(nil)
71
+ @threads[destination_slot_idx] = true # occupy it
72
+ throw :_found
73
+ end
74
+ end
75
+ sleep SCHEDULER_SLEEP_SECONDS # Breathing room
76
+ end
77
+ end
78
+
79
+ # No need to lock this because we already reserved that slot
80
+ @threads[destination_slot_idx] = Thread.new do
81
+ yield
82
+ # Now we can remove that process from the process table
83
+ @semaphore.synchronize { @threads[destination_slot_idx] = nil }
84
+ end
85
+
86
+ end
87
+
88
+ # Tells whether some processes are still churning
89
+ def still_running?
90
+ @threads.any?{|e| e && e.respond_to?(:alive?) && e.alive? }
91
+ end
92
+
93
+ # Analogous to Process.wait or wait_all - will block until all of the process slots have been freed.
94
+ def block_until_complete!
95
+ @threads.map do |e|
96
+ if e.respond_to?(:join) && e.alive?
97
+ e.join
98
+ end
99
+ end
100
+ end
101
+ end
@@ -0,0 +1,111 @@
1
+ require File.expand_path(File.dirname(__FILE__) + '/spec_helper')
2
+
3
+ describe Chantier::ProcessPool do
4
+
5
+ before(:each) do
6
+ @files = (0...20).map do
7
+ SecureRandom.hex(12).tap { |filename| FileUtils.touch(filename) }
8
+ end
9
+ end
10
+
11
+ after(:each) do
12
+ @files.map(&File.method(:unlink))
13
+ end
14
+
15
+ context '#map_fork' do
16
+ let(:manager) { described_class.new(5) }
17
+
18
+ it 'processes multiple files' do
19
+
20
+ data_chunks = (0..10).map{|e| Digest::SHA1.hexdigest(e.to_s) }
21
+
22
+ expect(manager).not_to be_still_running
23
+
24
+ manager.map_fork(@files) do | filename |
25
+ sleep(0.05 + (rand / 10))
26
+ File.open(filename, "wb"){|f| f.write("Worker completed for #{filename}") }
27
+ end
28
+
29
+ expect(manager).not_to be_still_running
30
+
31
+ @files.each do | filename |
32
+ expect(File.read(filename)).to eq("Worker completed for #{filename}")
33
+ end
34
+ end
35
+ end
36
+
37
+ context 'with 0 concurrent slots' do
38
+ it 'raises an exception' do
39
+ expect {
40
+ Chantier::ProcessPool.new(0)
41
+ }.to raise_error(RuntimeError, 'Need at least 1 slot, given 0')
42
+
43
+ expect {
44
+ Chantier::ProcessPool.new(-1)
45
+ }.to raise_error(RuntimeError, 'Need at least 1 slot, given -1')
46
+ end
47
+ end
48
+
49
+ it 'gets instantiated with the given number of slots' do
50
+ Chantier::ProcessPool.new(10)
51
+ end
52
+
53
+ context 'with 1 slot' do
54
+ let(:manager) { described_class.new(1) }
55
+
56
+ it 'processes 1 file' do
57
+ filename = @files[0]
58
+ manager.fork_task do
59
+ sleep(0.05 + (rand / 10))
60
+ File.open(filename, "wb"){|f| f.write("Worker completed") }
61
+ end
62
+ manager.block_until_complete!
63
+
64
+ expect(File.read(filename)).to eq('Worker completed')
65
+ end
66
+
67
+ it 'processes multiple files' do
68
+ expect(manager).not_to be_still_running
69
+
70
+ @files.each do | filename |
71
+ manager.fork_task do
72
+ sleep(0.05 + (rand / 10))
73
+ File.open(filename, "wb"){|f| f.write("Worker completed for #{filename}") }
74
+ end
75
+ end
76
+
77
+ expect(manager).to be_still_running
78
+
79
+ manager.block_until_complete!
80
+
81
+ @files.each do | filename |
82
+ expect(File.read(filename)).to eq("Worker completed for #{filename}")
83
+ end
84
+ end
85
+ end
86
+
87
+ context 'with 5 slots' do
88
+ let(:manager) { described_class.new(5) }
89
+
90
+ it 'processes multiple files' do
91
+
92
+ expect(manager).not_to be_still_running
93
+
94
+ @files.each do | filename |
95
+ manager.fork_task do
96
+ sleep(0.05 + (rand / 10))
97
+ File.open(filename, "wb"){|f| f.write("Worker completed for #{filename}") }
98
+ end
99
+ end
100
+
101
+ expect(manager).to be_still_running
102
+
103
+ manager.block_until_complete!
104
+
105
+ @files.each do | filename |
106
+ expect(File.read(filename)).to eq("Worker completed for #{filename}")
107
+ end
108
+ end
109
+ end
110
+ end
111
+
@@ -0,0 +1,29 @@
1
+ require 'simplecov'
2
+
3
+ module SimpleCov::Configuration
4
+ def clean_filters
5
+ @filters = []
6
+ end
7
+ end
8
+
9
+ SimpleCov.configure do
10
+ clean_filters
11
+ # load_adapter 'test_frameworks'
12
+ end
13
+
14
+ ENV["COVERAGE"] && SimpleCov.start do
15
+ add_filter "/.rvm/"
16
+ end
17
+ $LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
18
+ $LOAD_PATH.unshift(File.dirname(__FILE__))
19
+
20
+ require 'rspec'
21
+ require 'chantier'
22
+
23
+ # Requires supporting files with custom matchers and macros, etc,
24
+ # in ./support/ and its subdirectories.
25
+ Dir["#{File.dirname(__FILE__)}/support/**/*.rb"].each {|f| require f}
26
+
27
+ RSpec.configure do |config|
28
+
29
+ end
@@ -0,0 +1,111 @@
1
+ require File.expand_path(File.dirname(__FILE__) + '/spec_helper')
2
+
3
+ describe Chantier::ThreadPool do
4
+
5
+ before(:each) do
6
+ @files = (0...20).map do
7
+ SecureRandom.hex(12).tap { |filename| FileUtils.touch(filename) }
8
+ end
9
+ end
10
+
11
+ after(:each) do
12
+ @files.map(&File.method(:unlink))
13
+ end
14
+
15
+ context '#map_fork' do
16
+ let(:manager) { described_class.new(5) }
17
+
18
+ it 'processes multiple files' do
19
+
20
+ data_chunks = (0..10).map{|e| Digest::SHA1.hexdigest(e.to_s) }
21
+
22
+ expect(manager).not_to be_still_running
23
+
24
+ manager.map_fork(@files) do | filename |
25
+ sleep(0.05 + (rand / 10))
26
+ File.open(filename, "wb"){|f| f.write("Worker completed for #{filename}") }
27
+ end
28
+
29
+ expect(manager).not_to be_still_running
30
+
31
+ @files.each do | filename |
32
+ expect(File.read(filename)).to eq("Worker completed for #{filename}")
33
+ end
34
+ end
35
+ end
36
+
37
+ context 'with 0 concurrent slots' do
38
+ it 'raises an exception' do
39
+ expect {
40
+ described_class.new(0)
41
+ }.to raise_error(RuntimeError, 'Need at least 1 slot, given 0')
42
+
43
+ expect {
44
+ described_class.new(-1)
45
+ }.to raise_error(RuntimeError, 'Need at least 1 slot, given -1')
46
+ end
47
+ end
48
+
49
+ it 'gets instantiated with the given number of slots' do
50
+ described_class.new(10)
51
+ end
52
+
53
+ context 'with 1 slot' do
54
+ let(:manager) { described_class.new(1) }
55
+
56
+ it 'processes 1 file' do
57
+ filename = @files[0]
58
+ manager.fork_task do
59
+ sleep(0.05 + (rand / 10))
60
+ File.open(filename, "wb"){|f| f.write("Worker completed") }
61
+ end
62
+ manager.block_until_complete!
63
+
64
+ expect(File.read(filename)).to eq('Worker completed')
65
+ end
66
+
67
+ it 'processes multiple files' do
68
+ expect(manager).not_to be_still_running
69
+
70
+ @files.each do | filename |
71
+ manager.fork_task do
72
+ sleep(0.05 + (rand / 10))
73
+ File.open(filename, "wb"){|f| f.write("Worker completed for #{filename}") }
74
+ end
75
+ end
76
+
77
+ expect(manager).to be_still_running
78
+
79
+ manager.block_until_complete!
80
+
81
+ @files.each do | filename |
82
+ expect(File.read(filename)).to eq("Worker completed for #{filename}")
83
+ end
84
+ end
85
+ end
86
+
87
+ context 'with 5 slots' do
88
+ let(:manager) { described_class.new(5) }
89
+
90
+ it 'processes multiple files' do
91
+
92
+ expect(manager).not_to be_still_running
93
+
94
+ @files.each do | filename |
95
+ manager.fork_task do
96
+ sleep(0.05 + (rand / 10))
97
+ File.open(filename, "wb"){|f| f.write("Worker completed for #{filename}") }
98
+ end
99
+ end
100
+
101
+ expect(manager).to be_still_running
102
+
103
+ manager.block_until_complete!
104
+
105
+ @files.each do | filename |
106
+ expect(File.read(filename)).to eq("Worker completed for #{filename}")
107
+ end
108
+ end
109
+ end
110
+ end
111
+
metadata ADDED
@@ -0,0 +1,127 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: chantier
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.0.1
5
+ platform: ruby
6
+ authors:
7
+ - Julik Tarkhanov
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2014-07-13 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: rspec
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - ~>
18
+ - !ruby/object:Gem::Version
19
+ version: '2.9'
20
+ type: :development
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - ~>
25
+ - !ruby/object:Gem::Version
26
+ version: '2.9'
27
+ - !ruby/object:Gem::Dependency
28
+ name: rdoc
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - ~>
32
+ - !ruby/object:Gem::Version
33
+ version: '3.12'
34
+ type: :development
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - ~>
39
+ - !ruby/object:Gem::Version
40
+ version: '3.12'
41
+ - !ruby/object:Gem::Dependency
42
+ name: bundler
43
+ requirement: !ruby/object:Gem::Requirement
44
+ requirements:
45
+ - - ~>
46
+ - !ruby/object:Gem::Version
47
+ version: '1.0'
48
+ type: :development
49
+ prerelease: false
50
+ version_requirements: !ruby/object:Gem::Requirement
51
+ requirements:
52
+ - - ~>
53
+ - !ruby/object:Gem::Version
54
+ version: '1.0'
55
+ - !ruby/object:Gem::Dependency
56
+ name: jeweler
57
+ requirement: !ruby/object:Gem::Requirement
58
+ requirements:
59
+ - - ~>
60
+ - !ruby/object:Gem::Version
61
+ version: 2.0.1
62
+ type: :development
63
+ prerelease: false
64
+ version_requirements: !ruby/object:Gem::Requirement
65
+ requirements:
66
+ - - ~>
67
+ - !ruby/object:Gem::Version
68
+ version: 2.0.1
69
+ - !ruby/object:Gem::Dependency
70
+ name: simplecov
71
+ requirement: !ruby/object:Gem::Requirement
72
+ requirements:
73
+ - - '>='
74
+ - !ruby/object:Gem::Version
75
+ version: '0'
76
+ type: :development
77
+ prerelease: false
78
+ version_requirements: !ruby/object:Gem::Requirement
79
+ requirements:
80
+ - - '>='
81
+ - !ruby/object:Gem::Version
82
+ version: '0'
83
+ description: ' Process your jobs in parallel with a simple table of processes or threads '
84
+ email: me@julik.nl
85
+ executables: []
86
+ extensions: []
87
+ extra_rdoc_files:
88
+ - LICENSE.txt
89
+ - README.rdoc
90
+ files:
91
+ - .document
92
+ - .rspec
93
+ - Gemfile
94
+ - LICENSE.txt
95
+ - README.rdoc
96
+ - Rakefile
97
+ - lib/chantier.rb
98
+ - lib/process_pool.rb
99
+ - lib/thread_pool.rb
100
+ - spec/process_pool_spec.rb
101
+ - spec/spec_helper.rb
102
+ - spec/thread_pool_spec.rb
103
+ homepage: http://github.com/julik/chantier
104
+ licenses:
105
+ - MIT
106
+ metadata: {}
107
+ post_install_message:
108
+ rdoc_options: []
109
+ require_paths:
110
+ - lib
111
+ required_ruby_version: !ruby/object:Gem::Requirement
112
+ requirements:
113
+ - - '>='
114
+ - !ruby/object:Gem::Version
115
+ version: '0'
116
+ required_rubygems_version: !ruby/object:Gem::Requirement
117
+ requirements:
118
+ - - '>='
119
+ - !ruby/object:Gem::Version
120
+ version: '0'
121
+ requirements: []
122
+ rubyforge_project:
123
+ rubygems_version: 2.0.3
124
+ signing_key:
125
+ specification_version: 4
126
+ summary: Dead-simple worker table based multiprocessing/multithreading
127
+ test_files: []