concurrent-ruby 0.7.0.rc0-x86-mingw32

Sign up to get free protection for your applications and to get access to all the features.
Files changed (96) hide show
  1. checksums.yaml +15 -0
  2. data/LICENSE.txt +21 -0
  3. data/README.md +166 -0
  4. data/ext/concurrent_ruby_ext/atomic_reference.c +78 -0
  5. data/ext/concurrent_ruby_ext/atomic_reference.h +12 -0
  6. data/ext/concurrent_ruby_ext/extconf.rb +59 -0
  7. data/ext/concurrent_ruby_ext/rb_concurrent.c +28 -0
  8. data/lib/1.9/concurrent_ruby_ext.so +0 -0
  9. data/lib/2.0/concurrent_ruby_ext.so +0 -0
  10. data/lib/concurrent.rb +45 -0
  11. data/lib/concurrent/actress.rb +221 -0
  12. data/lib/concurrent/actress/ad_hoc.rb +20 -0
  13. data/lib/concurrent/actress/context.rb +98 -0
  14. data/lib/concurrent/actress/core.rb +228 -0
  15. data/lib/concurrent/actress/core_delegations.rb +42 -0
  16. data/lib/concurrent/actress/envelope.rb +41 -0
  17. data/lib/concurrent/actress/errors.rb +14 -0
  18. data/lib/concurrent/actress/reference.rb +64 -0
  19. data/lib/concurrent/actress/type_check.rb +48 -0
  20. data/lib/concurrent/agent.rb +232 -0
  21. data/lib/concurrent/async.rb +319 -0
  22. data/lib/concurrent/atomic.rb +46 -0
  23. data/lib/concurrent/atomic/atomic_boolean.rb +157 -0
  24. data/lib/concurrent/atomic/atomic_fixnum.rb +162 -0
  25. data/lib/concurrent/atomic/condition.rb +67 -0
  26. data/lib/concurrent/atomic/copy_on_notify_observer_set.rb +118 -0
  27. data/lib/concurrent/atomic/copy_on_write_observer_set.rb +117 -0
  28. data/lib/concurrent/atomic/count_down_latch.rb +116 -0
  29. data/lib/concurrent/atomic/cyclic_barrier.rb +106 -0
  30. data/lib/concurrent/atomic/event.rb +98 -0
  31. data/lib/concurrent/atomic/thread_local_var.rb +117 -0
  32. data/lib/concurrent/atomic_reference/concurrent_update_error.rb +7 -0
  33. data/lib/concurrent/atomic_reference/delegated_update.rb +28 -0
  34. data/lib/concurrent/atomic_reference/direct_update.rb +28 -0
  35. data/lib/concurrent/atomic_reference/jruby.rb +8 -0
  36. data/lib/concurrent/atomic_reference/mutex_atomic.rb +47 -0
  37. data/lib/concurrent/atomic_reference/numeric_cas_wrapper.rb +24 -0
  38. data/lib/concurrent/atomic_reference/rbx.rb +16 -0
  39. data/lib/concurrent/atomic_reference/ruby.rb +16 -0
  40. data/lib/concurrent/atomics.rb +10 -0
  41. data/lib/concurrent/channel/buffered_channel.rb +85 -0
  42. data/lib/concurrent/channel/channel.rb +41 -0
  43. data/lib/concurrent/channel/unbuffered_channel.rb +34 -0
  44. data/lib/concurrent/channel/waitable_list.rb +40 -0
  45. data/lib/concurrent/channels.rb +5 -0
  46. data/lib/concurrent/collection/blocking_ring_buffer.rb +71 -0
  47. data/lib/concurrent/collection/priority_queue.rb +305 -0
  48. data/lib/concurrent/collection/ring_buffer.rb +59 -0
  49. data/lib/concurrent/collections.rb +3 -0
  50. data/lib/concurrent/configuration.rb +158 -0
  51. data/lib/concurrent/dataflow.rb +91 -0
  52. data/lib/concurrent/delay.rb +112 -0
  53. data/lib/concurrent/dereferenceable.rb +101 -0
  54. data/lib/concurrent/errors.rb +30 -0
  55. data/lib/concurrent/exchanger.rb +34 -0
  56. data/lib/concurrent/executor/cached_thread_pool.rb +44 -0
  57. data/lib/concurrent/executor/executor.rb +229 -0
  58. data/lib/concurrent/executor/fixed_thread_pool.rb +33 -0
  59. data/lib/concurrent/executor/immediate_executor.rb +16 -0
  60. data/lib/concurrent/executor/java_cached_thread_pool.rb +31 -0
  61. data/lib/concurrent/executor/java_fixed_thread_pool.rb +33 -0
  62. data/lib/concurrent/executor/java_single_thread_executor.rb +21 -0
  63. data/lib/concurrent/executor/java_thread_pool_executor.rb +187 -0
  64. data/lib/concurrent/executor/per_thread_executor.rb +24 -0
  65. data/lib/concurrent/executor/ruby_cached_thread_pool.rb +29 -0
  66. data/lib/concurrent/executor/ruby_fixed_thread_pool.rb +32 -0
  67. data/lib/concurrent/executor/ruby_single_thread_executor.rb +73 -0
  68. data/lib/concurrent/executor/ruby_thread_pool_executor.rb +286 -0
  69. data/lib/concurrent/executor/ruby_thread_pool_worker.rb +72 -0
  70. data/lib/concurrent/executor/safe_task_executor.rb +35 -0
  71. data/lib/concurrent/executor/serialized_execution.rb +90 -0
  72. data/lib/concurrent/executor/single_thread_executor.rb +35 -0
  73. data/lib/concurrent/executor/thread_pool_executor.rb +68 -0
  74. data/lib/concurrent/executor/timer_set.rb +143 -0
  75. data/lib/concurrent/executors.rb +9 -0
  76. data/lib/concurrent/future.rb +124 -0
  77. data/lib/concurrent/ivar.rb +111 -0
  78. data/lib/concurrent/logging.rb +17 -0
  79. data/lib/concurrent/mvar.rb +200 -0
  80. data/lib/concurrent/obligation.rb +171 -0
  81. data/lib/concurrent/observable.rb +40 -0
  82. data/lib/concurrent/options_parser.rb +46 -0
  83. data/lib/concurrent/promise.rb +169 -0
  84. data/lib/concurrent/scheduled_task.rb +78 -0
  85. data/lib/concurrent/supervisor.rb +343 -0
  86. data/lib/concurrent/timer_task.rb +341 -0
  87. data/lib/concurrent/tvar.rb +252 -0
  88. data/lib/concurrent/utilities.rb +3 -0
  89. data/lib/concurrent/utility/processor_count.rb +150 -0
  90. data/lib/concurrent/utility/timeout.rb +35 -0
  91. data/lib/concurrent/utility/timer.rb +21 -0
  92. data/lib/concurrent/version.rb +3 -0
  93. data/lib/concurrent_ruby.rb +1 -0
  94. data/lib/concurrent_ruby_ext.so +0 -0
  95. data/lib/extension_helper.rb +9 -0
  96. metadata +142 -0
@@ -0,0 +1,286 @@
1
+ require 'thread'
2
+
3
+ require_relative 'executor'
4
+ require 'concurrent/atomic/event'
5
+ require 'concurrent/executor/ruby_thread_pool_worker'
6
+
7
+ module Concurrent
8
+
9
+ # @!macro thread_pool_executor
10
+ class RubyThreadPoolExecutor
11
+ include RubyExecutor
12
+
13
+ # Default maximum number of threads that will be created in the pool.
14
+ DEFAULT_MAX_POOL_SIZE = 2**15 # 32768
15
+
16
+ # Default minimum number of threads that will be retained in the pool.
17
+ DEFAULT_MIN_POOL_SIZE = 0
18
+
19
+ # Default maximum number of tasks that may be added to the task queue.
20
+ DEFAULT_MAX_QUEUE_SIZE = 0
21
+
22
+ # Default maximum number of seconds a thread in the pool may remain idle
23
+ # before being reclaimed.
24
+ DEFAULT_THREAD_IDLETIMEOUT = 60
25
+
26
+ # The set of possible overflow policies that may be set at thread pool creation.
27
+ OVERFLOW_POLICIES = [:abort, :discard, :caller_runs]
28
+
29
+ # The maximum number of threads that may be created in the pool.
30
+ attr_reader :max_length
31
+
32
+ # The minimum number of threads that may be retained in the pool.
33
+ attr_reader :min_length
34
+
35
+ # The largest number of threads that have been created in the pool since construction.
36
+ attr_reader :largest_length
37
+
38
+ # The number of tasks that have been scheduled for execution on the pool since construction.
39
+ attr_reader :scheduled_task_count
40
+
41
+ # The number of tasks that have been completed by the pool since construction.
42
+ attr_reader :completed_task_count
43
+
44
+ # The number of seconds that a thread may be idle before being reclaimed.
45
+ attr_reader :idletime
46
+
47
+ # The maximum number of tasks that may be waiting in the work queue at any one time.
48
+ # When the queue size reaches `max_queue` subsequent tasks will be rejected in
49
+ # accordance with the configured `overflow_policy`.
50
+ attr_reader :max_queue
51
+
52
+ # The policy defining how rejected tasks (tasks received once the queue size reaches
53
+ # the configured `max_queue`) are handled. Must be one of the values specified in
54
+ # `OVERFLOW_POLICIES`.
55
+ attr_reader :overflow_policy
56
+
57
+ # Create a new thread pool.
58
+ #
59
+ # @param [Hash] opts the options which configure the thread pool
60
+ #
61
+ # @option opts [Integer] :max_threads (DEFAULT_MAX_POOL_SIZE) the maximum
62
+ # number of threads to be created
63
+ # @option opts [Integer] :min_threads (DEFAULT_MIN_POOL_SIZE) the minimum
64
+ # number of threads to be retained
65
+ # @option opts [Integer] :idletime (DEFAULT_THREAD_IDLETIMEOUT) the maximum
66
+ # number of seconds a thread may be idle before being reclaimed
67
+ # @option opts [Integer] :max_queue (DEFAULT_MAX_QUEUE_SIZE) the maximum
68
+ # number of tasks allowed in the work queue at any one time; a value of
69
+ # zero means the queue may grow without bounnd
70
+ # @option opts [Symbol] :overflow_policy (:abort) the policy for handling new
71
+ # tasks that are received when the queue size has reached `max_queue`
72
+ #
73
+ # @raise [ArgumentError] if `:max_threads` is less than one
74
+ # @raise [ArgumentError] if `:min_threads` is less than zero
75
+ # @raise [ArgumentError] if `:overflow_policy` is not one of the values specified
76
+ # in `OVERFLOW_POLICIES`
77
+ #
78
+ # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html
79
+ def initialize(opts = {})
80
+ @min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i
81
+ @max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i
82
+ @idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i
83
+ @max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i
84
+ @overflow_policy = opts.fetch(:overflow_policy, :abort)
85
+
86
+ raise ArgumentError.new('max_threads must be greater than zero') if @max_length <= 0
87
+ raise ArgumentError.new('min_threads cannot be less than zero') if @min_length < 0
88
+ raise ArgumentError.new("#{overflow_policy} is not a valid overflow policy") unless OVERFLOW_POLICIES.include?(@overflow_policy)
89
+
90
+ init_executor
91
+
92
+ @pool = []
93
+ @queue = Queue.new
94
+ @scheduled_task_count = 0
95
+ @completed_task_count = 0
96
+ @largest_length = 0
97
+
98
+ @gc_interval = opts.fetch(:gc_interval, 1).to_i # undocumented
99
+ @last_gc_time = Time.now.to_f - [1.0, (@gc_interval * 2.0)].max
100
+ end
101
+
102
+ def can_overflow?
103
+ @max_queue != 0
104
+ end
105
+
106
+ # The number of threads currently in the pool.
107
+ #
108
+ # @return [Integer] the length
109
+ def length
110
+ mutex.synchronize{ running? ? @pool.length : 0 }
111
+ end
112
+ alias_method :current_length, :length
113
+
114
+ # The number of tasks in the queue awaiting execution.
115
+ #
116
+ # @return [Integer] the queue_length
117
+ def queue_length
118
+ mutex.synchronize{ running? ? @queue.length : 0 }
119
+ end
120
+
121
+ # Number of tasks that may be enqueued before reaching `max_queue` and rejecting
122
+ # new tasks. A value of -1 indicates that the queue may grow without bound.
123
+ #
124
+ # @return [Integer] the remaining_capacity
125
+ def remaining_capacity
126
+ mutex.synchronize { @max_queue == 0 ? -1 : @max_queue - @queue.length }
127
+ end
128
+
129
+ # Returns an array with the status of each thread in the pool
130
+ #
131
+ # This method is deprecated and will be removed soon.
132
+ def status
133
+ warn '[DEPRECATED] `status` is deprecated and will be removed soon.'
134
+ mutex.synchronize { @pool.collect { |worker| worker.status } }
135
+ end
136
+
137
+ # Run on task completion.
138
+ #
139
+ # @!visibility private
140
+ def on_end_task
141
+ mutex.synchronize do
142
+ @completed_task_count += 1 #if success
143
+ break unless running?
144
+ end
145
+ end
146
+
147
+ # Run when a thread worker exits.
148
+ #
149
+ # @!visibility private
150
+ def on_worker_exit(worker)
151
+ mutex.synchronize do
152
+ @pool.delete(worker)
153
+ if @pool.empty? && ! running?
154
+ stop_event.set
155
+ stopped_event.set
156
+ end
157
+ end
158
+ end
159
+
160
+ protected
161
+
162
+ # @!visibility private
163
+ def execute(*args, &task)
164
+ prune_pool
165
+ if ensure_capacity?
166
+ @scheduled_task_count += 1
167
+ @queue << [args, task]
168
+ else
169
+ handle_overflow(*args, &task) if @max_queue != 0 && @queue.length >= @max_queue
170
+ end
171
+ end
172
+
173
+ # @!visibility private
174
+ def shutdown_execution
175
+ if @pool.empty?
176
+ stopped_event.set
177
+ else
178
+ @pool.length.times{ @queue << :stop }
179
+ end
180
+ end
181
+
182
+ # @!visibility private
183
+ def kill_execution
184
+ @queue.clear
185
+ drain_pool
186
+ end
187
+
188
+ # Check the thread pool configuration and determine if the pool
189
+ # has enought capacity to handle the request. Will grow the size
190
+ # of the pool if necessary.
191
+ #
192
+ # @return [Boolean] true if the pool has enough capacity else false
193
+ #
194
+ # @!visibility private
195
+ def ensure_capacity?
196
+ additional = 0
197
+ capacity = true
198
+
199
+ if @pool.size < @min_length
200
+ additional = @min_length - @pool.size
201
+ elsif @queue.empty? && @queue.num_waiting >= 1
202
+ additional = 0
203
+ elsif @pool.size == 0 && @min_length == 0
204
+ additional = 1
205
+ elsif @pool.size < @max_length || @max_length == 0
206
+ additional = 1
207
+ elsif @max_queue == 0 || @queue.size < @max_queue
208
+ additional = 0
209
+ else
210
+ capacity = false
211
+ end
212
+
213
+ additional.times do
214
+ @pool << create_worker_thread
215
+ end
216
+
217
+ if additional > 0
218
+ @largest_length = [@largest_length, @pool.length].max
219
+ end
220
+
221
+ capacity
222
+ end
223
+
224
+ # Handler which executes the `overflow_policy` once the queue size
225
+ # reaches `max_queue`.
226
+ #
227
+ # @param [Array] args the arguments to the task which is being handled.
228
+ #
229
+ # @!visibility private
230
+ def handle_overflow(*args)
231
+ case @overflow_policy
232
+ when :abort
233
+ raise RejectedExecutionError
234
+ when :discard
235
+ false
236
+ when :caller_runs
237
+ begin
238
+ yield(*args)
239
+ rescue => ex
240
+ # let it fail
241
+ log DEBUG, ex
242
+ end
243
+ true
244
+ end
245
+ end
246
+
247
+ # Scan all threads in the pool and reclaim any that are dead or
248
+ # have been idle too long. Will check the last time the pool was
249
+ # pruned and only run if the configured garbage collection
250
+ # interval has passed.
251
+ #
252
+ # @!visibility private
253
+ def prune_pool
254
+ if Time.now.to_f - @gc_interval >= @last_gc_time
255
+ @pool.delete_if do |worker|
256
+ worker.dead? ||
257
+ (@idletime == 0 ? false : Time.now.to_f - @idletime > worker.last_activity)
258
+ end
259
+ @last_gc_time = Time.now.to_f
260
+ end
261
+ end
262
+
263
+ # Reclaim all threads in the pool.
264
+ #
265
+ # @!visibility private
266
+ def drain_pool
267
+ @pool.each {|worker| worker.kill }
268
+ @pool.clear
269
+ end
270
+
271
+ # Create a single worker thread to be added to the pool.
272
+ #
273
+ # @return [Thread] the new thread.
274
+ #
275
+ # @!visibility private
276
+ def create_worker_thread
277
+ wrkr = RubyThreadPoolWorker.new(@queue, self)
278
+ Thread.new(wrkr, self) do |worker, parent|
279
+ Thread.current.abort_on_exception = false
280
+ worker.run
281
+ parent.on_worker_exit(worker)
282
+ end
283
+ return wrkr
284
+ end
285
+ end
286
+ end
@@ -0,0 +1,72 @@
1
+ require 'thread'
2
+ require 'concurrent/logging'
3
+
4
+ module Concurrent
5
+
6
+ # @!visibility private
7
+ class RubyThreadPoolWorker
8
+ include Logging
9
+
10
+ # @!visibility private
11
+ def initialize(queue, parent)
12
+ @queue = queue
13
+ @parent = parent
14
+ @mutex = Mutex.new
15
+ @last_activity = Time.now.to_f
16
+ end
17
+
18
+ # @!visibility private
19
+ def dead?
20
+ return @mutex.synchronize do
21
+ @thread.nil? ? false : ! @thread.alive?
22
+ end
23
+ end
24
+
25
+ # @!visibility private
26
+ def last_activity
27
+ @mutex.synchronize { @last_activity }
28
+ end
29
+
30
+ def status
31
+ @mutex.synchronize do
32
+ return 'not running' if @thread.nil?
33
+ @thread.status
34
+ end
35
+ end
36
+
37
+ # @!visibility private
38
+ def kill
39
+ @mutex.synchronize do
40
+ Thread.kill(@thread) unless @thread.nil?
41
+ @thread = nil
42
+ end
43
+ end
44
+
45
+ # @!visibility private
46
+ def run(thread = Thread.current)
47
+ @mutex.synchronize do
48
+ raise StandardError.new('already running') unless @thread.nil?
49
+ @thread = thread
50
+ end
51
+
52
+ loop do
53
+ task = @queue.pop
54
+ if task == :stop
55
+ @thread = nil
56
+ @parent.on_worker_exit(self)
57
+ break
58
+ end
59
+
60
+ begin
61
+ task.last.call(*task.first)
62
+ rescue => ex
63
+ # let it fail
64
+ log DEBUG, ex
65
+ ensure
66
+ @last_activity = Time.now.to_f
67
+ @parent.on_end_task
68
+ end
69
+ end
70
+ end
71
+ end
72
+ end
@@ -0,0 +1,35 @@
1
+ require 'thread'
2
+
3
+ module Concurrent
4
+
5
+ # A simple utility class that executes a callable and returns and array of three elements:
6
+ # success - indicating if the callable has been executed without errors
7
+ # value - filled by the callable result if it has been executed without errors, nil otherwise
8
+ # reason - the error risen by the callable if it has been executed with errors, nil otherwise
9
+ class SafeTaskExecutor
10
+
11
+ def initialize(task, opts = {})
12
+ @task = task
13
+ @mutex = Mutex.new
14
+ @exception_class = opts.fetch(:rescue_exception, false) ? Exception : StandardError
15
+ end
16
+
17
+ # @return [Array]
18
+ def execute(*args)
19
+ @mutex.synchronize do
20
+ success = false
21
+ value = reason = nil
22
+
23
+ begin
24
+ value = @task.call(*args)
25
+ success = true
26
+ rescue @exception_class => ex
27
+ reason = ex
28
+ success = false
29
+ end
30
+
31
+ [success, value, reason]
32
+ end
33
+ end
34
+ end
35
+ end
@@ -0,0 +1,90 @@
1
+ require 'concurrent/logging'
2
+
3
+ module Concurrent
4
+
5
+ # Ensures passed jobs in a serialized order never running at the same time.
6
+ class SerializedExecution
7
+ include Logging
8
+
9
+ Job = Struct.new(:executor, :args, :block) do
10
+ def call
11
+ block.call *args
12
+ end
13
+ end
14
+
15
+ def initialize
16
+ @being_executed = false
17
+ @stash = []
18
+ @mutex = Mutex.new
19
+ end
20
+
21
+ # Submit a task to the executor for asynchronous processing.
22
+ #
23
+ # @param [Executor] executor to be used for this job
24
+ #
25
+ # @param [Array] args zero or more arguments to be passed to the task
26
+ #
27
+ # @yield the asynchronous task to perform
28
+ #
29
+ # @return [Boolean] `true` if the task is queued, `false` if the executor
30
+ # is not running
31
+ #
32
+ # @raise [ArgumentError] if no task is given
33
+ def post(executor, *args, &task)
34
+ return nil if task.nil?
35
+
36
+ job = Job.new executor, args, task
37
+
38
+ begin
39
+ @mutex.lock
40
+ post = if @being_executed
41
+ @stash << job
42
+ false
43
+ else
44
+ @being_executed = true
45
+ end
46
+ ensure
47
+ @mutex.unlock
48
+ end
49
+
50
+ call_job job if post
51
+ true
52
+ end
53
+
54
+ private
55
+
56
+ def call_job(job)
57
+ did_it_run = begin
58
+ job.executor.post { work(job) }
59
+ true
60
+ rescue RejectedExecutionError => ex
61
+ false
62
+ end
63
+
64
+ # TODO not the best idea to run it myself
65
+ unless did_it_run
66
+ begin
67
+ work job
68
+ rescue => ex
69
+ # let it fail
70
+ log DEBUG, ex
71
+ end
72
+ end
73
+ end
74
+
75
+ # ensures next job is executed if any is stashed
76
+ def work(job)
77
+ job.call
78
+ ensure
79
+ begin
80
+ @mutex.lock
81
+ job = @stash.shift || (@being_executed = false)
82
+ ensure
83
+ @mutex.unlock
84
+ end
85
+
86
+ call_job job if job
87
+ end
88
+
89
+ end
90
+ end