concurrent-ruby 0.5.0 → 0.6.0.pre.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (90) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +88 -77
  3. data/lib/concurrent.rb +17 -2
  4. data/lib/concurrent/actor.rb +17 -0
  5. data/lib/concurrent/actor_context.rb +31 -0
  6. data/lib/concurrent/actor_ref.rb +39 -0
  7. data/lib/concurrent/agent.rb +12 -3
  8. data/lib/concurrent/async.rb +290 -0
  9. data/lib/concurrent/atomic.rb +5 -9
  10. data/lib/concurrent/cached_thread_pool.rb +39 -137
  11. data/lib/concurrent/channel/blocking_ring_buffer.rb +60 -0
  12. data/lib/concurrent/channel/buffered_channel.rb +83 -0
  13. data/lib/concurrent/channel/channel.rb +11 -0
  14. data/lib/concurrent/channel/probe.rb +19 -0
  15. data/lib/concurrent/channel/ring_buffer.rb +54 -0
  16. data/lib/concurrent/channel/unbuffered_channel.rb +34 -0
  17. data/lib/concurrent/channel/waitable_list.rb +38 -0
  18. data/lib/concurrent/configuration.rb +92 -0
  19. data/lib/concurrent/dataflow.rb +9 -3
  20. data/lib/concurrent/delay.rb +88 -0
  21. data/lib/concurrent/exchanger.rb +31 -0
  22. data/lib/concurrent/fixed_thread_pool.rb +28 -122
  23. data/lib/concurrent/future.rb +10 -5
  24. data/lib/concurrent/immediate_executor.rb +3 -2
  25. data/lib/concurrent/ivar.rb +2 -1
  26. data/lib/concurrent/java_cached_thread_pool.rb +45 -0
  27. data/lib/concurrent/java_fixed_thread_pool.rb +37 -0
  28. data/lib/concurrent/java_thread_pool_executor.rb +194 -0
  29. data/lib/concurrent/per_thread_executor.rb +23 -0
  30. data/lib/concurrent/postable.rb +2 -0
  31. data/lib/concurrent/processor_count.rb +125 -0
  32. data/lib/concurrent/promise.rb +42 -18
  33. data/lib/concurrent/ruby_cached_thread_pool.rb +37 -0
  34. data/lib/concurrent/ruby_fixed_thread_pool.rb +31 -0
  35. data/lib/concurrent/ruby_thread_pool_executor.rb +268 -0
  36. data/lib/concurrent/ruby_thread_pool_worker.rb +69 -0
  37. data/lib/concurrent/simple_actor_ref.rb +124 -0
  38. data/lib/concurrent/thread_local_var.rb +1 -1
  39. data/lib/concurrent/thread_pool_executor.rb +30 -0
  40. data/lib/concurrent/timer_task.rb +13 -10
  41. data/lib/concurrent/tvar.rb +212 -0
  42. data/lib/concurrent/utilities.rb +1 -0
  43. data/lib/concurrent/version.rb +1 -1
  44. data/spec/concurrent/actor_context_spec.rb +37 -0
  45. data/spec/concurrent/actor_ref_shared.rb +313 -0
  46. data/spec/concurrent/actor_spec.rb +9 -1
  47. data/spec/concurrent/agent_spec.rb +97 -96
  48. data/spec/concurrent/async_spec.rb +320 -0
  49. data/spec/concurrent/cached_thread_pool_shared.rb +137 -0
  50. data/spec/concurrent/channel/blocking_ring_buffer_spec.rb +149 -0
  51. data/spec/concurrent/channel/buffered_channel_spec.rb +151 -0
  52. data/spec/concurrent/channel/channel_spec.rb +37 -0
  53. data/spec/concurrent/channel/probe_spec.rb +49 -0
  54. data/spec/concurrent/channel/ring_buffer_spec.rb +126 -0
  55. data/spec/concurrent/channel/unbuffered_channel_spec.rb +132 -0
  56. data/spec/concurrent/configuration_spec.rb +134 -0
  57. data/spec/concurrent/dataflow_spec.rb +109 -27
  58. data/spec/concurrent/delay_spec.rb +77 -0
  59. data/spec/concurrent/exchanger_spec.rb +66 -0
  60. data/spec/concurrent/fixed_thread_pool_shared.rb +136 -0
  61. data/spec/concurrent/future_spec.rb +60 -51
  62. data/spec/concurrent/global_thread_pool_shared.rb +33 -0
  63. data/spec/concurrent/immediate_executor_spec.rb +4 -25
  64. data/spec/concurrent/ivar_spec.rb +36 -23
  65. data/spec/concurrent/java_cached_thread_pool_spec.rb +64 -0
  66. data/spec/concurrent/java_fixed_thread_pool_spec.rb +64 -0
  67. data/spec/concurrent/java_thread_pool_executor_spec.rb +71 -0
  68. data/spec/concurrent/obligation_shared.rb +32 -20
  69. data/spec/concurrent/{global_thread_pool_spec.rb → per_thread_executor_spec.rb} +9 -13
  70. data/spec/concurrent/processor_count_spec.rb +20 -0
  71. data/spec/concurrent/promise_spec.rb +29 -41
  72. data/spec/concurrent/ruby_cached_thread_pool_spec.rb +69 -0
  73. data/spec/concurrent/ruby_fixed_thread_pool_spec.rb +39 -0
  74. data/spec/concurrent/ruby_thread_pool_executor_spec.rb +183 -0
  75. data/spec/concurrent/simple_actor_ref_spec.rb +219 -0
  76. data/spec/concurrent/thread_pool_class_cast_spec.rb +40 -0
  77. data/spec/concurrent/thread_pool_executor_shared.rb +155 -0
  78. data/spec/concurrent/thread_pool_shared.rb +98 -36
  79. data/spec/concurrent/tvar_spec.rb +137 -0
  80. data/spec/spec_helper.rb +4 -0
  81. data/spec/support/functions.rb +4 -0
  82. metadata +85 -20
  83. data/lib/concurrent/cached_thread_pool/worker.rb +0 -91
  84. data/lib/concurrent/channel.rb +0 -63
  85. data/lib/concurrent/fixed_thread_pool/worker.rb +0 -54
  86. data/lib/concurrent/global_thread_pool.rb +0 -42
  87. data/spec/concurrent/cached_thread_pool_spec.rb +0 -101
  88. data/spec/concurrent/channel_spec.rb +0 -86
  89. data/spec/concurrent/fixed_thread_pool_spec.rb +0 -92
  90. data/spec/concurrent/uses_global_thread_pool_shared.rb +0 -64
@@ -0,0 +1,23 @@
1
+ module Concurrent
2
+
3
+ class PerThreadExecutor
4
+
5
+ def self.post(*args)
6
+ raise ArgumentError.new('no block given') unless block_given?
7
+ Thread.new(*args) do
8
+ Thread.current.abort_on_exception = false
9
+ yield(*args)
10
+ end
11
+ return true
12
+ end
13
+
14
+ def post(*args, &block)
15
+ return PerThreadExecutor.post(*args, &block)
16
+ end
17
+
18
+ def <<(block)
19
+ PerThreadExecutor.post(&block)
20
+ return self
21
+ end
22
+ end
23
+ end
@@ -75,6 +75,7 @@ module Concurrent
75
75
  end
76
76
  end
77
77
 
78
+ # @deprecated +Actor+ is being replaced with a completely new framework prior to v1.0.0
78
79
  def forward(receiver, *message)
79
80
  raise ArgumentError.new('empty message') if message.empty?
80
81
  return false unless ready?
@@ -82,6 +83,7 @@ module Concurrent
82
83
  queue.length
83
84
  end
84
85
 
86
+ # @deprecated +Actor+ is being replaced with a completely new framework prior to v1.0.0
85
87
  def ready?
86
88
  if self.respond_to?(:running?) && ! running?
87
89
  false
@@ -0,0 +1,125 @@
1
+ require 'rbconfig'
2
+
3
+ module Concurrent
4
+
5
+ # Number of processors seen by the OS and used for process scheduling. For performance
6
+ # reasons the calculated value will be memoized on the first call.
7
+ #
8
+ # When running under JRuby the Java runtime call +java.lang.Runtime.getRuntime.availableProcessors+
9
+ # will be used. According to the Java documentation this "value may change
10
+ # during a particular invocation of the virtual machine... [applications]
11
+ # should therefore occasionally poll this property." Subsequently the result
12
+ # will NOT be memoized under JRuby.
13
+ #
14
+ # On Windows the Win32 API will be queried for the `NumberOfLogicalProcessors from Win32_Processor`.
15
+ # This will return the total number "logical processors for the current instance of the processor",
16
+ # which taked into account hyperthreading.
17
+ #
18
+ # * AIX: /usr/sbin/pmcycles (AIX 5+), /usr/sbin/lsdev
19
+ # * BSD: /sbin/sysctl
20
+ # * Cygwin: /proc/cpuinfo
21
+ # * Darwin: /usr/bin/hwprefs, /usr/sbin/sysctl
22
+ # * HP-UX: /usr/sbin/ioscan
23
+ # * IRIX: /usr/sbin/sysconf
24
+ # * Linux: /proc/cpuinfo
25
+ # * Minix 3+: /proc/cpuinfo
26
+ # * Solaris: /usr/sbin/psrinfo
27
+ # * Tru64 UNIX: /usr/sbin/psrinfo
28
+ # * UnixWare: /usr/sbin/psrinfo
29
+ #
30
+ # @return [Integer] number of processors seen by the OS or Java runtime
31
+ #
32
+ # @see https://github.com/grosser/parallel/blob/4fc8b89d08c7091fe0419ca8fba1ec3ce5a8d185/lib/parallel.rb
33
+ #
34
+ # @see http://docs.oracle.com/javase/6/docs/api/java/lang/Runtime.html#availableProcessors()
35
+ # @see http://msdn.microsoft.com/en-us/library/aa394373(v=vs.85).aspx
36
+ def processor_count
37
+ if RUBY_PLATFORM == 'java'
38
+ java.lang.Runtime.getRuntime.availableProcessors
39
+ else
40
+ @@processor_count ||= begin
41
+ os_name = RbConfig::CONFIG["target_os"]
42
+ if os_name =~ /mingw|mswin/
43
+ require 'win32ole'
44
+ result = WIN32OLE.connect("winmgmts://").ExecQuery(
45
+ "select NumberOfLogicalProcessors from Win32_Processor")
46
+ result.to_enum.collect(&:NumberOfLogicalProcessors).reduce(:+)
47
+ elsif File.readable?("/proc/cpuinfo")
48
+ IO.read("/proc/cpuinfo").scan(/^processor/).size
49
+ elsif File.executable?("/usr/bin/hwprefs")
50
+ IO.popen("/usr/bin/hwprefs thread_count").read.to_i
51
+ elsif File.executable?("/usr/sbin/psrinfo")
52
+ IO.popen("/usr/sbin/psrinfo").read.scan(/^.*on-*line/).size
53
+ elsif File.executable?("/usr/sbin/ioscan")
54
+ IO.popen("/usr/sbin/ioscan -kC processor") do |out|
55
+ out.read.scan(/^.*processor/).size
56
+ end
57
+ elsif File.executable?("/usr/sbin/pmcycles")
58
+ IO.popen("/usr/sbin/pmcycles -m").read.count("\n")
59
+ elsif File.executable?("/usr/sbin/lsdev")
60
+ IO.popen("/usr/sbin/lsdev -Cc processor -S 1").read.count("\n")
61
+ elsif File.executable?("/usr/sbin/sysconf") and os_name =~ /irix/i
62
+ IO.popen("/usr/sbin/sysconf NPROC_ONLN").read.to_i
63
+ elsif File.executable?("/usr/sbin/sysctl")
64
+ IO.popen("/usr/sbin/sysctl -n hw.ncpu").read.to_i
65
+ elsif File.executable?("/sbin/sysctl")
66
+ IO.popen("/sbin/sysctl -n hw.ncpu").read.to_i
67
+ else
68
+ 1
69
+ end
70
+ end
71
+ end
72
+ rescue
73
+ return 1
74
+ end
75
+ module_function :processor_count
76
+
77
+ # Number of physical processor cores on the current system. For performance reasons
78
+ # the calculated value will be memoized on the first call.
79
+ #
80
+ # On Windows the Win32 API will be queried for the `NumberOfCores from Win32_Processor`.
81
+ # This will return the total number "of cores for the current instance of the processor."
82
+ # On Unix-like operating systems either the `hwprefs` or `sysctl` utility will be called
83
+ # in a subshell and the returned value will be used. In the rare case where none of these
84
+ # methods work or an exception is raised the function will simply return 1.
85
+ #
86
+ # @return [Integer] number physical processor cores on the current system
87
+ #
88
+ # @see https://github.com/grosser/parallel/blob/4fc8b89d08c7091fe0419ca8fba1ec3ce5a8d185/lib/parallel.rb
89
+ #
90
+ # @see http://msdn.microsoft.com/en-us/library/aa394373(v=vs.85).aspx
91
+ # @see http://www.unix.com/man-page/osx/1/HWPREFS/
92
+ # @see http://linux.die.net/man/8/sysctl
93
+ def physical_processor_count
94
+ @@physical_processor_count ||= begin
95
+ ppc = case RbConfig::CONFIG["target_os"]
96
+ when /darwin1/
97
+ IO.popen("/usr/sbin/sysctl -n hw.physicalcpu").read.to_i
98
+ when /linux/
99
+ cores = {} # unique physical ID / core ID combinations
100
+ phy = 0
101
+ IO.read("/proc/cpuinfo").scan(/^physical id.*|^core id.*/) do |ln|
102
+ if ln.start_with?("physical")
103
+ phy = ln[/\d+/]
104
+ elsif ln.start_with?("core")
105
+ cid = phy + ":" + ln[/\d+/]
106
+ cores[cid] = true if not cores[cid]
107
+ end
108
+ end
109
+ cores.count
110
+ when /mswin|mingw/
111
+ require 'win32ole'
112
+ result_set = WIN32OLE.connect("winmgmts://").ExecQuery(
113
+ "select NumberOfCores from Win32_Processor")
114
+ result_set.to_enum.collect(&:NumberOfCores).reduce(:+)
115
+ else
116
+ processor_count
117
+ end
118
+ # fall back to logical count if physical info is invalid
119
+ ppc > 0 ? ppc : processor_count
120
+ end
121
+ rescue
122
+ return 1
123
+ end
124
+ module_function :physical_processor_count
125
+ end
@@ -1,22 +1,43 @@
1
1
  require 'thread'
2
2
 
3
- require 'concurrent/global_thread_pool'
3
+ require 'concurrent/configuration'
4
4
  require 'concurrent/obligation'
5
5
 
6
6
  module Concurrent
7
7
 
8
8
  class Promise
9
9
  include Obligation
10
- include UsesGlobalThreadPool
11
-
10
+ include OptionsParser
11
+
12
+ # Initialize a new Promise with the provided options.
13
+ #
14
+ # @param [Object] initial the initial value
15
+ # @param [Hash] opts the options used to define the behavior at update and deref
16
+ #
17
+ # @option opts [Promise] :parent the parent +Promise+ when building a chain/tree
18
+ # @option opts [Proc] :on_fulfill fulfillment handler
19
+ # @option opts [Proc] :on_reject rejection handler
20
+ #
21
+ # @option opts [Boolean] :operation (false) when +true+ will execute the future on the global
22
+ # operation pool (for long-running operations), when +false+ will execute the future on the
23
+ # global task pool (for short-running tasks)
24
+ # @option opts [object] :executor when provided will run all operations on
25
+ # this executor rather than the global thread pool (overrides :operation)
26
+ #
27
+ # @option opts [String] :dup_on_deref (false) call +#dup+ before returning the data
28
+ # @option opts [String] :freeze_on_deref (false) call +#freeze+ before returning the data
29
+ # @option opts [String] :copy_on_deref (nil) call the given +Proc+ passing the internal value and
30
+ # returning the value returned from the proc
31
+ #
12
32
  # @see http://wiki.commonjs.org/wiki/Promises/A
13
33
  # @see http://promises-aplus.github.io/promises-spec/
14
- def initialize(options = {}, &block)
15
- options.delete_if {|k, v| v.nil?}
34
+ def initialize(opts = {}, &block)
35
+ opts.delete_if {|k, v| v.nil?}
16
36
 
17
- @parent = options.fetch(:parent) { nil }
18
- @on_fulfill = options.fetch(:on_fulfill) { Proc.new{ |result| result } }
19
- @on_reject = options.fetch(:on_reject) { Proc.new{ |reason| raise reason } }
37
+ @executor = get_executor_from(opts)
38
+ @parent = opts.fetch(:parent) { nil }
39
+ @on_fulfill = opts.fetch(:on_fulfill) { Proc.new{ |result| result } }
40
+ @on_reject = opts.fetch(:on_reject) { Proc.new{ |reason| raise reason } }
20
41
 
21
42
  @promise_body = block || Proc.new{|result| result }
22
43
  @state = :unscheduled
@@ -26,14 +47,14 @@ module Concurrent
26
47
  end
27
48
 
28
49
  # @return [Promise]
29
- def self.fulfill(value)
30
- Promise.new.tap { |p| p.send(:synchronized_set_state!, true, value, nil) }
50
+ def self.fulfill(value, opts = {})
51
+ Promise.new(opts).tap{ |p| p.send(:synchronized_set_state!, true, value, nil) }
31
52
  end
32
53
 
33
54
 
34
55
  # @return [Promise]
35
- def self.reject(reason)
36
- Promise.new.tap { |p| p.send(:synchronized_set_state!, false, nil, reason) }
56
+ def self.reject(reason, opts = {})
57
+ Promise.new(opts).tap{ |p| p.send(:synchronized_set_state!, false, nil, reason) }
37
58
  end
38
59
 
39
60
  # @return [Promise]
@@ -51,16 +72,20 @@ module Concurrent
51
72
  end
52
73
 
53
74
  # @since 0.5.0
54
- def self.execute(&block)
55
- new(&block).execute
75
+ def self.execute(opts = {}, &block)
76
+ new(opts, &block).execute
56
77
  end
57
78
 
58
-
59
79
  # @return [Promise] the new promise
60
80
  def then(rescuer = nil, &block)
61
81
  raise ArgumentError.new('rescuers and block are both missing') if rescuer.nil? && !block_given?
62
82
  block = Proc.new{ |result| result } if block.nil?
63
- child = Promise.new(parent: self, on_fulfill: block, on_reject: rescuer)
83
+ child = Promise.new(
84
+ parent: self,
85
+ executor: @executor,
86
+ on_fulfill: block,
87
+ on_reject: rescuer
88
+ )
64
89
 
65
90
  mutex.synchronize do
66
91
  child.state = :pending if @state == :pending
@@ -118,7 +143,7 @@ module Concurrent
118
143
 
119
144
  # @!visibility private
120
145
  def realize(task)
121
- Promise.thread_pool.post do
146
+ @executor.post do
122
147
  success, value, reason = SafeTaskExecutor.new( task ).execute
123
148
 
124
149
  children_to_notify = mutex.synchronize do
@@ -140,6 +165,5 @@ module Concurrent
140
165
  set_state!(success, value, reason)
141
166
  end
142
167
  end
143
-
144
168
  end
145
169
  end
@@ -0,0 +1,37 @@
1
+ require 'concurrent/ruby_thread_pool_executor'
2
+
3
+ module Concurrent
4
+
5
+ # @!macro cached_thread_pool
6
+ class RubyCachedThreadPool < RubyThreadPoolExecutor
7
+
8
+ # Create a new thread pool.
9
+ #
10
+ # @param [Hash] opts the options defining pool behavior.
11
+ # @option opts [Integer] :max_threads (+DEFAULT_MAX_POOL_SIZE+) maximum number
12
+ # of threads which may be created in the pool
13
+ # @option opts [Integer] :idletime (+DEFAULT_THREAD_IDLETIMEOUT+) maximum
14
+ # number of seconds a thread may be idle before it is reclaimed
15
+ #
16
+ # @raise [ArgumentError] if +max_threads+ is less than or equal to zero
17
+ # @raise [ArgumentError] if +idletime+ is less than or equal to zero
18
+ # @raise [ArgumentError] if +overflow_policy+ is not a known policy
19
+ def initialize(opts = {})
20
+ max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i
21
+ idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i
22
+ overflow_policy = opts.fetch(:overflow_policy, :abort)
23
+
24
+ raise ArgumentError.new('idletime must be greater than zero') if idletime <= 0
25
+ raise ArgumentError.new('max_threads must be greater than zero') if max_length <= 0
26
+ raise ArgumentError.new("#{overflow_policy} is not a valid overflow policy") unless OVERFLOW_POLICIES.include?(overflow_policy)
27
+
28
+ opts = opts.merge(
29
+ min_threads: 0,
30
+ max_threads: max_length,
31
+ num_threads: overflow_policy,
32
+ idletime: idletime
33
+ )
34
+ super(opts)
35
+ end
36
+ end
37
+ end
@@ -0,0 +1,31 @@
1
+ require 'concurrent/ruby_thread_pool_executor'
2
+
3
+ module Concurrent
4
+
5
+ # @!macro fixed_thread_pool
6
+ class RubyFixedThreadPool < RubyThreadPoolExecutor
7
+
8
+ # Create a new thread pool.
9
+ #
10
+ # @param [Integer] num_threads the number of threads to allocate
11
+ # @param [Hash] opts the options defining pool behavior.
12
+ # @option opts [Symbol] :overflow_policy (+:abort+) the overflow policy
13
+ #
14
+ # @raise [ArgumentError] if +num_threads+ is less than or equal to zero
15
+ # @raise [ArgumentError] if +overflow_policy+ is not a known policy
16
+ def initialize(num_threads, opts = {})
17
+ overflow_policy = opts.fetch(:overflow_policy, :abort)
18
+
19
+ raise ArgumentError.new('number of threads must be greater than zero') if num_threads < 1
20
+ raise ArgumentError.new("#{overflow_policy} is not a valid overflow policy") unless OVERFLOW_POLICIES.include?(overflow_policy)
21
+
22
+ opts = opts.merge(
23
+ min_threads: num_threads,
24
+ max_threads: num_threads,
25
+ num_threads: overflow_policy,
26
+ idletime: 0
27
+ )
28
+ super(opts)
29
+ end
30
+ end
31
+ end
@@ -0,0 +1,268 @@
1
+ require 'thread'
2
+
3
+ require 'concurrent/event'
4
+ require 'concurrent/ruby_thread_pool_worker'
5
+
6
+ module Concurrent
7
+
8
+ RejectedExecutionError = Class.new(StandardError) unless defined? RejectedExecutionError
9
+
10
+ # @!macro thread_pool_executor
11
+ class RubyThreadPoolExecutor
12
+
13
+ # The maximum number of threads that will be created in the pool
14
+ # (unless overridden during construction).
15
+ DEFAULT_MAX_POOL_SIZE = 2**15 # 32768
16
+
17
+ # The minimum number of threads that will be created in the pool
18
+ # (unless overridden during construction).
19
+ DEFAULT_MIN_POOL_SIZE = 0
20
+
21
+ DEFAULT_MAX_QUEUE_SIZE = 0
22
+
23
+ # The maximum number of seconds a thread in the pool may remain idle before
24
+ # being reclaimed (unless overridden during construction).
25
+ DEFAULT_THREAD_IDLETIMEOUT = 60
26
+
27
+ OVERFLOW_POLICIES = [:abort, :discard, :caller_runs]
28
+
29
+ # The maximum number of threads that may be created in the pool.
30
+ attr_reader :max_length
31
+ attr_reader :min_length
32
+
33
+ attr_reader :largest_length
34
+
35
+ attr_reader :scheduled_task_count
36
+ attr_reader :completed_task_count
37
+
38
+ attr_reader :idletime
39
+
40
+ attr_reader :max_queue
41
+
42
+ attr_reader :overflow_policy
43
+
44
+ # Create a new thread pool.
45
+ #
46
+ # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html
47
+ def initialize(opts = {})
48
+ @min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i
49
+ @max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i
50
+ @idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i
51
+ @max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i
52
+ @overflow_policy = opts.fetch(:overflow_policy, :abort)
53
+
54
+ raise ArgumentError.new('max_threads must be greater than zero') if @max_length <= 0
55
+ raise ArgumentError.new('min_threads cannot be less than zero') if @min_length < 0
56
+ raise ArgumentError.new("#{overflow_policy} is not a valid overflow policy") unless OVERFLOW_POLICIES.include?(@overflow_policy)
57
+
58
+ @state = :running
59
+ @pool = []
60
+ @terminator = Event.new
61
+ @queue = Queue.new
62
+ @mutex = Mutex.new
63
+ @scheduled_task_count = 0
64
+ @completed_task_count = 0
65
+ @largest_length = 0
66
+
67
+ @gc_interval = opts.fetch(:gc_interval, 1).to_i # undocumented
68
+ @last_gc_time = Time.now.to_f - [1.0, (@gc_interval * 2.0)].max
69
+ end
70
+
71
+ def length
72
+ @mutex.synchronize do
73
+ @state != :shutdown ? @pool.length : 0
74
+ end
75
+ end
76
+ alias_method :current_length, :length
77
+
78
+ def queue_length
79
+ @queue.length
80
+ end
81
+
82
+ def remaining_capacity
83
+ @mutex.synchronize { @max_queue == 0 ? -1 : @max_queue - @queue.length }
84
+ end
85
+
86
+ # Is the thread pool running?
87
+ #
88
+ # @return [Boolean] +true+ when running, +false+ when shutting down or shutdown
89
+ def running?
90
+ @mutex.synchronize { @state == :running }
91
+ end
92
+
93
+ # Returns an array with the status of each thread in the pool
94
+ #
95
+ # This method is deprecated and will be removed soon.
96
+ def status
97
+ warn '[DEPRECATED] `status` is deprecated and will be removed soon.'
98
+ @mutex.synchronize { @pool.collect { |worker| worker.status } }
99
+ end
100
+
101
+ # Is the thread pool shutdown?
102
+ #
103
+ # @return [Boolean] +true+ when shutdown, +false+ when shutting down or running
104
+ def shutdown?
105
+ @mutex.synchronize { @state != :running }
106
+ end
107
+
108
+ # Block until thread pool shutdown is complete or until +timeout+ seconds have
109
+ # passed.
110
+ #
111
+ # @note Does not initiate shutdown or termination. Either +shutdown+ or +kill+
112
+ # must be called before this method (or on another thread).
113
+ #
114
+ # @param [Integer] timeout the maximum number of seconds to wait for shutdown to complete
115
+ #
116
+ # @return [Boolean] +true+ if shutdown complete or false on +timeout+
117
+ def wait_for_termination(timeout)
118
+ return @terminator.wait(timeout.to_i)
119
+ end
120
+
121
+ # Submit a task to the thread pool for asynchronous processing.
122
+ #
123
+ # @param [Array] args zero or more arguments to be passed to the task
124
+ #
125
+ # @yield the asynchronous task to perform
126
+ #
127
+ # @return [Boolean] +true+ if the task is queued, +false+ if the thread pool
128
+ # is not running
129
+ #
130
+ # @raise [ArgumentError] if no task is given
131
+ def post(*args, &task)
132
+ raise ArgumentError.new('no block given') unless block_given?
133
+ @mutex.synchronize do
134
+ break false unless @state == :running
135
+ return handle_overflow(*args, &task) if @max_queue != 0 && @queue.length >= @max_queue
136
+ @scheduled_task_count += 1
137
+ @queue << [args, task]
138
+ if Time.now.to_f - @gc_interval >= @last_gc_time
139
+ prune_pool
140
+ @last_gc_time = Time.now.to_f
141
+ end
142
+ grow_pool
143
+ true
144
+ end
145
+ end
146
+
147
+ # Submit a task to the thread pool for asynchronous processing.
148
+ #
149
+ # @param [Proc] task the asynchronous task to perform
150
+ #
151
+ # @return [self] returns itself
152
+ def <<(task)
153
+ self.post(&task)
154
+ return self
155
+ end
156
+
157
+ # Begin an orderly shutdown. Tasks already in the queue will be executed,
158
+ # but no new tasks will be accepted. Has no additional effect if the
159
+ # thread pool is not running.
160
+ def shutdown
161
+ @mutex.synchronize do
162
+ break unless @state == :running
163
+ @queue.clear
164
+ if @pool.empty?
165
+ @state = :shutdown
166
+ @terminator.set
167
+ else
168
+ @state = :shuttingdown
169
+ @pool.length.times{ @queue << :stop }
170
+ end
171
+ end
172
+ end
173
+
174
+ # Begin an immediate shutdown. In-progress tasks will be allowed to
175
+ # complete but enqueued tasks will be dismissed and no new tasks
176
+ # will be accepted. Has no additional effect if the thread pool is
177
+ # not running.
178
+ def kill
179
+ @mutex.synchronize do
180
+ break if @state == :shutdown
181
+ @queue.clear
182
+ @state = :shutdown
183
+ drain_pool
184
+ @terminator.set
185
+ end
186
+ end
187
+
188
+ # @!visibility private
189
+ def on_end_task # :nodoc:
190
+ @mutex.synchronize do
191
+ @completed_task_count += 1 #if success
192
+ break unless @state == :running
193
+ end
194
+ end
195
+
196
+ # @!visibility private
197
+ def on_worker_exit(worker) # :nodoc:
198
+ @mutex.synchronize do
199
+ @pool.delete(worker)
200
+ if @pool.empty? && @state != :running
201
+ @state = :shutdown
202
+ @terminator.set
203
+ end
204
+ end
205
+ end
206
+
207
+ protected
208
+
209
+ # @!visibility private
210
+ def handle_overflow(*args) # :nodoc:
211
+ case @overflow_policy
212
+ when :abort
213
+ raise RejectedExecutionError
214
+ when :discard
215
+ false
216
+ when :caller_runs
217
+ begin
218
+ yield(*args)
219
+ rescue
220
+ # let it fail
221
+ end
222
+ true
223
+ end
224
+ end
225
+
226
+ # @!visibility private
227
+ def prune_pool # :nodoc:
228
+ @pool.delete_if do |worker|
229
+ worker.dead? ||
230
+ (@idletime == 0 ? false : Time.now.to_f - @idletime > worker.last_activity)
231
+ end
232
+ end
233
+
234
+ # @!visibility private
235
+ def grow_pool # :nodoc:
236
+ if @min_length > @pool.length
237
+ additional = @min_length - @pool.length
238
+ elsif @pool.length < @max_length && ! @queue.empty?
239
+ # NOTE: does not take into account idle threads
240
+ additional = 1
241
+ else
242
+ additional = 0
243
+ end
244
+ additional.times do
245
+ break if @pool.length >= @max_length
246
+ @pool << create_worker_thread
247
+ end
248
+ @largest_length = [@largest_length, @pool.length].max
249
+ end
250
+
251
+ # @!visibility private
252
+ def drain_pool # :nodoc:
253
+ @pool.each {|worker| worker.kill }
254
+ @pool.clear
255
+ end
256
+
257
+ # @!visibility private
258
+ def create_worker_thread # :nodoc:
259
+ wrkr = RubyThreadPoolWorker.new(@queue, self)
260
+ Thread.new(wrkr, self) do |worker, parent|
261
+ Thread.current.abort_on_exception = false
262
+ worker.run
263
+ parent.on_worker_exit(worker)
264
+ end
265
+ return wrkr
266
+ end
267
+ end
268
+ end