concurrent-ruby 0.9.0.pre2-java → 0.9.0.pre3-java
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +8 -3
- data/lib/concurrent.rb +56 -0
- data/lib/concurrent/async.rb +7 -29
- data/lib/concurrent/atom.rb +2 -42
- data/lib/concurrent/atomic_reference/direct_update.rb +31 -3
- data/lib/concurrent/atomic_reference/mutex_atomic.rb +7 -7
- data/lib/concurrent/configuration.rb +10 -13
- data/lib/concurrent/edge.rb +4 -4
- data/lib/concurrent/errors.rb +12 -10
- data/lib/concurrent/executor/cached_thread_pool.rb +41 -36
- data/lib/concurrent/executor/fixed_thread_pool.rb +40 -47
- data/lib/concurrent/executor/thread_pool_executor.rb +1 -0
- data/lib/concurrent/ivar.rb +1 -1
- data/lib/concurrent/timer_task.rb +5 -3
- data/lib/concurrent/version.rb +2 -2
- data/lib/concurrent_ruby_ext.jar +0 -0
- metadata +5 -9
- data/lib/concurrent/executor/java_cached_thread_pool.rb +0 -34
- data/lib/concurrent/executor/java_fixed_thread_pool.rb +0 -24
- data/lib/concurrent/executor/ruby_cached_thread_pool.rb +0 -20
- data/lib/concurrent/executor/ruby_fixed_thread_pool.rb +0 -21
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 10a5012e42fee5380405d4af4afcbc1057bdda26
|
4
|
+
data.tar.gz: d3a355604220a071410631424fc05288df4a67e6
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 3e3bfd78da01a87137ad705c28a0503f057689e64614d4e22fcea1a79d7e29f041a90d36d0dceb16937fa17e8dc66542a4a528ebaee11d0a05ab0c7a901049f8
|
7
|
+
data.tar.gz: 41308b6e724dfa7dbc4a74f979b748b62aa475c8396e7e46234e2c0214910aafb253112f9c413934f97e2123c8831ad394d34cf8c6be251262420ee24b387bca
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,10 @@
|
|
1
1
|
### Next Release v0.9.0 (Target Date: 7 June 2015)
|
2
2
|
|
3
|
+
|
4
|
+
* Updated `AtomicReference`
|
5
|
+
- `AtomicReference#try_update` now simply returns instead of raising exception
|
6
|
+
- `AtomicReference#try_update!` was added to raise exceptions if an update
|
7
|
+
fails. Note: this is the same behavior as the old `try_update`
|
3
8
|
* Pure Java implementations of
|
4
9
|
- `AtomicBoolean`
|
5
10
|
- `AtomicFixnum`
|
@@ -58,7 +63,7 @@
|
|
58
63
|
- `Channel`
|
59
64
|
- `Exchanger`
|
60
65
|
- `LazyRegister`
|
61
|
-
- **new Future Framework** <http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Edge.html> - unified
|
66
|
+
- **new Future Framework** <http://ruby-concurrency.github.io/concurrent-ruby/Concurrent/Edge.html> - unified
|
62
67
|
implementation of Futures and Promises which combines Features of previous `Future`,
|
63
68
|
`Promise`, `IVar`, `Event`, `Probe`, `dataflow`, `Delay`, `TimerTask` into single framework. It uses extensively
|
64
69
|
new synchronization layer to make all the paths **lock-free** with exception of blocking threads on `#wait`.
|
@@ -80,7 +85,7 @@
|
|
80
85
|
- Add AbstractContext#default_executor to be able to override executor class wide
|
81
86
|
- Add basic IO example
|
82
87
|
- Documentation somewhat improved
|
83
|
-
- All messages should have same priority. It's now possible to send `actor << job1 << job2 << :terminate!` and
|
88
|
+
- All messages should have same priority. It's now possible to send `actor << job1 << job2 << :terminate!` and
|
84
89
|
be sure that both jobs are processed first.
|
85
90
|
* Refactored `Channel` to use newer synchronization objects
|
86
91
|
* Added `#reset` and `#cancel` methods to `TimerSet`
|
@@ -162,7 +167,7 @@ Please see the [roadmap](https://github.com/ruby-concurrency/concurrent-ruby/iss
|
|
162
167
|
- `SerializedExecutionDelegator` for serializing *any* executor
|
163
168
|
* Updated `Async` with serialized execution
|
164
169
|
* Updated `ImmediateExecutor` and `PerThreadExecutor` with full executor service lifecycle
|
165
|
-
* Added a `Delay` to root `Actress` initialization
|
170
|
+
* Added a `Delay` to root `Actress` initialization
|
166
171
|
* Minor bug fixes to thread pools
|
167
172
|
* Refactored many intermittently failing specs
|
168
173
|
* Removed Java interop warning `executor.rb:148 warning: ambiguous Java methods found, using submit(java.lang.Runnable)`
|
data/lib/concurrent.rb
CHANGED
@@ -49,6 +49,62 @@ require 'concurrent/tvar'
|
|
49
49
|
#
|
50
50
|
# @see http://linux.die.net/man/3/clock_gettime Linux clock_gettime(3)
|
51
51
|
|
52
|
+
# @!macro [new] copy_options
|
53
|
+
#
|
54
|
+
# ## Copy Options
|
55
|
+
#
|
56
|
+
# Object references in Ruby are mutable. This can lead to serious
|
57
|
+
# problems when the {#value} of an object is a mutable reference. Which
|
58
|
+
# is always the case unless the value is a `Fixnum`, `Symbol`, or similar
|
59
|
+
# "primative" data type. Each instance can be configured with a few
|
60
|
+
# options that can help protect the program from potentially dangerous
|
61
|
+
# operations. Each of these options can be optionally set when the oject
|
62
|
+
# instance is created:
|
63
|
+
#
|
64
|
+
# * `:dup_on_deref` When true the object will call the `#dup` method on
|
65
|
+
# the `value` object every time the `#value` methid is called
|
66
|
+
# (default: false)
|
67
|
+
# * `:freeze_on_deref` When true the object will call the `#freeze`
|
68
|
+
# method on the `value` object every time the `#value` method is called
|
69
|
+
# (default: false)
|
70
|
+
# * `:copy_on_deref` When given a `Proc` object the `Proc` will be run
|
71
|
+
# every time the `#value` method is called. The `Proc` will be given
|
72
|
+
# the current `value` as its only argument and the result returned by
|
73
|
+
# the block will be the return value of the `#value` call. When `nil`
|
74
|
+
# this option will be ignored (default: nil)
|
75
|
+
#
|
76
|
+
# When multiple deref options are set the order of operations is strictly defined.
|
77
|
+
# The order of deref operations is:
|
78
|
+
# * `:copy_on_deref`
|
79
|
+
# * `:dup_on_deref`
|
80
|
+
# * `:freeze_on_deref`
|
81
|
+
#
|
82
|
+
# Because of this ordering there is no need to `#freeze` an object created by a
|
83
|
+
# provided `:copy_on_deref` block. Simply set `:freeze_on_deref` to `true`.
|
84
|
+
# Setting both `:dup_on_deref` to `true` and `:freeze_on_deref` to `true` is
|
85
|
+
# as close to the behavior of a "pure" functional language (like Erlang, Clojure,
|
86
|
+
# or Haskell) as we are likely to get in Ruby.
|
87
|
+
|
88
|
+
# @!macro [attach] deref_options
|
89
|
+
#
|
90
|
+
# @option opts [Boolean] :dup_on_deref (false) Call `#dup` before
|
91
|
+
# returning the data from {#value}
|
92
|
+
# @option opts [Boolean] :freeze_on_deref (false) Call `#freeze` before
|
93
|
+
# returning the data from {#value}
|
94
|
+
# @option opts [Proc] :copy_on_deref (nil) When calling the {#value}
|
95
|
+
# method, call the given proc passing the internal value as the sole
|
96
|
+
# argument then return the new value returned from the proc.
|
97
|
+
|
98
|
+
# @!macro [attach] executor_and_deref_options
|
99
|
+
#
|
100
|
+
# @param [Hash] opts the options used to define the behavior at update and deref
|
101
|
+
# and to specify the executor on which to perform actions
|
102
|
+
# @option opts [Executor] :executor when set use the given `Executor` instance.
|
103
|
+
# Three special values are also supported: `:task` returns the global task pool,
|
104
|
+
# `:operation` returns the global operation pool, and `:immediate` returns a new
|
105
|
+
# `ImmediateExecutor` object.
|
106
|
+
# @!macro deref_options
|
107
|
+
|
52
108
|
# Modern concurrency tools for Ruby. Inspired by Erlang, Clojure, Scala, Haskell,
|
53
109
|
# F#, C#, Java, and classic concurrency patterns.
|
54
110
|
#
|
data/lib/concurrent/async.rb
CHANGED
@@ -5,6 +5,7 @@ require 'concurrent/errors'
|
|
5
5
|
require 'concurrent/ivar'
|
6
6
|
require 'concurrent/executor/immediate_executor'
|
7
7
|
require 'concurrent/executor/serialized_execution'
|
8
|
+
require 'concurrent/concern/deprecation'
|
8
9
|
|
9
10
|
module Concurrent
|
10
11
|
|
@@ -30,23 +31,12 @@ module Concurrent
|
|
30
31
|
# object. The former method allows methods to be called asynchronously by posting
|
31
32
|
# to the global thread pool. The latter allows a method to be called synchronously
|
32
33
|
# on the current thread but does so safely with respect to any pending asynchronous
|
33
|
-
# method calls. Both methods return an `
|
34
|
+
# method calls. Both methods return an `IVar` which can be inspected for
|
34
35
|
# the result of the method call. Calling a method with `async` will return a
|
35
|
-
# `:pending` `
|
36
|
+
# `:pending` `IVar` whereas `await` will return a `:complete` `IVar`.
|
36
37
|
#
|
37
38
|
# Very loosely based on the `async` and `await` keywords in C#.
|
38
39
|
#
|
39
|
-
# ### An Important Note About Initialization
|
40
|
-
#
|
41
|
-
# > This module depends on several internal stnchronization mechanisms that
|
42
|
-
# > must be initialized prior to calling any of the async/await/executor methods.
|
43
|
-
# > To ensure thread-safe initialization the class `new` method will be made
|
44
|
-
# > private when the `Concurrent::Async` module is included. A factory method
|
45
|
-
# > called `create` will be defined in its place. The `create`factory will
|
46
|
-
# > create a new object instance, passing all arguments to the constructor,
|
47
|
-
# > and will initialize all stnchronization mechanisms. This is the only way
|
48
|
-
# > thread-safe initialization can be guaranteed.
|
49
|
-
#
|
50
40
|
# ### An Important Note About Thread Safe Guarantees
|
51
41
|
#
|
52
42
|
# > Thread safe guarantees can only be made when asynchronous method calls
|
@@ -70,23 +60,19 @@ module Concurrent
|
|
70
60
|
# nil
|
71
61
|
# end
|
72
62
|
# end
|
73
|
-
#
|
74
|
-
# horn = Echo.new #=> NoMethodError: private method `new' called for Echo:Class
|
75
63
|
#
|
76
|
-
# horn = Echo.
|
64
|
+
# horn = Echo.new
|
77
65
|
# horn.echo('zero') # synchronous, not thread-safe
|
78
66
|
#
|
79
67
|
# horn.async.echo('one') # asynchronous, non-blocking, thread-safe
|
80
68
|
# horn.await.echo('two') # synchronous, blocking, thread-safe
|
81
69
|
#
|
82
|
-
# @see Concurrent::Concern::Obligation
|
83
70
|
# @see Concurrent::IVar
|
84
71
|
module Async
|
85
72
|
|
86
|
-
# @!method self.
|
73
|
+
# @!method self.new(*args, &block)
|
87
74
|
#
|
88
|
-
#
|
89
|
-
# class. Used instead of `new` to ensure proper initialization of the
|
75
|
+
# Instanciate a new object and ensure proper initialization of the
|
90
76
|
# synchronization mechanisms.
|
91
77
|
#
|
92
78
|
# @param [Array<Object>] args Zero or more arguments to be passed to the
|
@@ -129,21 +115,13 @@ module Concurrent
|
|
129
115
|
# @!visibility private
|
130
116
|
def self.included(base)
|
131
117
|
base.singleton_class.send(:alias_method, :original_new, :new)
|
132
|
-
base.send(:private_class_method, :original_new)
|
133
118
|
base.extend(ClassMethods)
|
134
119
|
super(base)
|
135
120
|
end
|
136
121
|
|
137
122
|
# @!visibility private
|
138
123
|
module ClassMethods
|
139
|
-
|
140
|
-
# @deprecated
|
141
124
|
def new(*args, &block)
|
142
|
-
warn '[DEPRECATED] use the `create` method instead'
|
143
|
-
create(*args, &block)
|
144
|
-
end
|
145
|
-
|
146
|
-
def create(*args, &block)
|
147
125
|
obj = original_new(*args, &block)
|
148
126
|
obj.send(:init_synchronization)
|
149
127
|
obj
|
@@ -280,7 +258,7 @@ module Concurrent
|
|
280
258
|
# @!visibility private
|
281
259
|
# @deprecated
|
282
260
|
def init_mutex
|
283
|
-
|
261
|
+
deprecated 'mutex synchronization now happens automatically'
|
284
262
|
init_synchronization
|
285
263
|
rescue InitializationError
|
286
264
|
# suppress
|
data/lib/concurrent/atom.rb
CHANGED
@@ -18,40 +18,7 @@ module Concurrent
|
|
18
18
|
# new value to the result of running the given block if and only if that
|
19
19
|
# value validates.
|
20
20
|
#
|
21
|
-
# @!macro
|
22
|
-
# ## Copy Options
|
23
|
-
#
|
24
|
-
# Object references in Ruby are mutable. This can lead to serious
|
25
|
-
# problems when the {#value} of an object is a mutable reference. Which
|
26
|
-
# is always the case unless the value is a `Fixnum`, `Symbol`, or similar
|
27
|
-
# "primative" data type. Each instance can be configured with a few
|
28
|
-
# options that can help protect the program from potentially dangerous
|
29
|
-
# operations. Each of these options can be optionally set when the oject
|
30
|
-
# instance is created:
|
31
|
-
#
|
32
|
-
# * `:dup_on_deref` When true the object will call the `#dup` method on
|
33
|
-
# the `value` object every time the `#value` methid is called
|
34
|
-
# (default: false)
|
35
|
-
# * `:freeze_on_deref` When true the object will call the `#freeze`
|
36
|
-
# method on the `value` object every time the `#value` method is called
|
37
|
-
# (default: false)
|
38
|
-
# * `:copy_on_deref` When given a `Proc` object the `Proc` will be run
|
39
|
-
# every time the `#value` method is called. The `Proc` will be given
|
40
|
-
# the current `value` as its only argument and the result returned by
|
41
|
-
# the block will be the return value of the `#value` call. When `nil`
|
42
|
-
# this option will be ignored (default: nil)
|
43
|
-
#
|
44
|
-
# When multiple deref options are set the order of operations is strictly defined.
|
45
|
-
# The order of deref operations is:
|
46
|
-
# * `:copy_on_deref`
|
47
|
-
# * `:dup_on_deref`
|
48
|
-
# * `:freeze_on_deref`
|
49
|
-
#
|
50
|
-
# Because of this ordering there is no need to `#freeze` an object created by a
|
51
|
-
# provided `:copy_on_deref` block. Simply set `:freeze_on_deref` to `true`.
|
52
|
-
# Setting both `:dup_on_deref` to `true` and `:freeze_on_deref` to `true` is
|
53
|
-
# as close to the behavior of a "pure" functional language (like Erlang, Clojure,
|
54
|
-
# or Haskell) as we are likely to get in Ruby.
|
21
|
+
# @!macro copy_options
|
55
22
|
#
|
56
23
|
# @see http://clojure.org/atoms Clojure Atoms
|
57
24
|
class Atom < Synchronization::Object
|
@@ -66,14 +33,7 @@ module Concurrent
|
|
66
33
|
# intended new value. The validator will return true if the new value
|
67
34
|
# is acceptable else return false (preferrably) or raise an exception.
|
68
35
|
#
|
69
|
-
# @!macro
|
70
|
-
# @option opts [Boolean] :dup_on_deref (false) Call `#dup` before
|
71
|
-
# returning the data from {#value}
|
72
|
-
# @option opts [Boolean] :freeze_on_deref (false) Call `#freeze` before
|
73
|
-
# returning the data from {#value}
|
74
|
-
# @option opts [Proc] :copy_on_deref (nil) When calling the {#value}
|
75
|
-
# method, call the given proc passing the internal value as the sole
|
76
|
-
# argument then return the new value returned from the proc.
|
36
|
+
# @!macro deref_options
|
77
37
|
#
|
78
38
|
# @raise [ArgumentError] if the validator is not a `Proc` (when given)
|
79
39
|
def initialize(value, opts = {})
|
@@ -13,7 +13,7 @@ module Concurrent
|
|
13
13
|
# Pass the current value to the given block, replacing it
|
14
14
|
# with the block's result. May retry if the value changes
|
15
15
|
# during the block's execution.
|
16
|
-
#
|
16
|
+
#
|
17
17
|
# @yield [Object] Calculate a new value for the atomic reference using
|
18
18
|
# given (old) value
|
19
19
|
# @yieldparam [Object] old_value the starting value of the atomic reference
|
@@ -27,17 +27,45 @@ module Concurrent
|
|
27
27
|
# @!macro [attach] atomic_reference_method_try_update
|
28
28
|
#
|
29
29
|
# Pass the current value to the given block, replacing it
|
30
|
+
# with the block's result. Return nil if the update fails.
|
31
|
+
#
|
32
|
+
# @yield [Object] Calculate a new value for the atomic reference using
|
33
|
+
# given (old) value
|
34
|
+
# @yieldparam [Object] old_value the starting value of the atomic reference
|
35
|
+
#
|
36
|
+
# @note This method was altered to avoid raising an exception by default.
|
37
|
+
# Instead, this method now returns `nil` in case of failure. For more info,
|
38
|
+
# please see: https://github.com/ruby-concurrency/concurrent-ruby/pull/336
|
39
|
+
#
|
40
|
+
# @return [Object] the new value, or nil if update failed
|
41
|
+
def try_update
|
42
|
+
old_value = get
|
43
|
+
new_value = yield old_value
|
44
|
+
|
45
|
+
return unless compare_and_set old_value, new_value
|
46
|
+
|
47
|
+
new_value
|
48
|
+
end
|
49
|
+
|
50
|
+
# @!macro [attach] atomic_reference_method_try_update!
|
51
|
+
#
|
52
|
+
# Pass the current value to the given block, replacing it
|
30
53
|
# with the block's result. Raise an exception if the update
|
31
54
|
# fails.
|
32
|
-
#
|
55
|
+
#
|
33
56
|
# @yield [Object] Calculate a new value for the atomic reference using
|
34
57
|
# given (old) value
|
35
58
|
# @yieldparam [Object] old_value the starting value of the atomic reference
|
36
59
|
#
|
60
|
+
# @note This behavior mimics the behavior of the original
|
61
|
+
# `AtomicReference#try_update` API. The reason this was changed was to
|
62
|
+
# avoid raising exceptions (which are inherently slow) by default. For more
|
63
|
+
# info: https://github.com/ruby-concurrency/concurrent-ruby/pull/336
|
64
|
+
#
|
37
65
|
# @return [Object] the new value
|
38
66
|
#
|
39
67
|
# @raise [Concurrent::ConcurrentUpdateError] if the update fails
|
40
|
-
def try_update
|
68
|
+
def try_update!
|
41
69
|
old_value = get
|
42
70
|
new_value = yield old_value
|
43
71
|
unless compare_and_set(old_value, new_value)
|
@@ -42,14 +42,14 @@ module Concurrent
|
|
42
42
|
|
43
43
|
# @!macro atomic_reference_method_compare_and_set
|
44
44
|
def _compare_and_set(old_value, new_value)
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
45
|
+
@mutex.synchronize do
|
46
|
+
if @value.equal? old_value
|
47
|
+
@value = new_value
|
48
|
+
true
|
49
|
+
else
|
50
|
+
false
|
51
|
+
end
|
51
52
|
end
|
52
|
-
true
|
53
53
|
end
|
54
54
|
end
|
55
55
|
end
|
@@ -73,11 +73,10 @@ module Concurrent
|
|
73
73
|
end
|
74
74
|
end
|
75
75
|
|
76
|
-
# Disables AtExit
|
77
|
-
# When disabled it will be the application
|
78
|
-
#
|
79
|
-
#
|
80
|
-
# by calling {AtExit.run} method.
|
76
|
+
# Disables AtExit handlers including pool auto-termination handlers.
|
77
|
+
# When disabled it will be the application programmer's responsibility
|
78
|
+
# to ensure that the handlers are shutdown properly prior to application
|
79
|
+
# exit by calling {AtExit.run} method.
|
81
80
|
#
|
82
81
|
# @note this option should be needed only because of `at_exit` ordering
|
83
82
|
# issues which may arise when running some of the testing frameworks.
|
@@ -88,13 +87,13 @@ module Concurrent
|
|
88
87
|
# from within a gem. It should *only* be used from within the main
|
89
88
|
# application and even then it should be used only when necessary.
|
90
89
|
# @see AtExit
|
91
|
-
def self.
|
90
|
+
def self.disable_at_exit_handlers!
|
92
91
|
AtExit.enabled = false
|
93
92
|
end
|
94
93
|
|
95
94
|
def self.disable_executor_auto_termination!
|
96
|
-
deprecated_method 'disable_executor_auto_termination!', '
|
97
|
-
|
95
|
+
deprecated_method 'disable_executor_auto_termination!', 'disable_at_exit_handlers!'
|
96
|
+
disable_at_exit_handlers!
|
98
97
|
end
|
99
98
|
|
100
99
|
# @return [true,false]
|
@@ -132,8 +131,6 @@ module Concurrent
|
|
132
131
|
# Global thread pool user for global *timers*.
|
133
132
|
#
|
134
133
|
# @return [Concurrent::TimerSet] the thread pool
|
135
|
-
#
|
136
|
-
# @see Concurrent::timer
|
137
134
|
def self.global_timer_set
|
138
135
|
GLOBAL_TIMER_SET.value
|
139
136
|
end
|
@@ -249,10 +246,10 @@ module Concurrent
|
|
249
246
|
Concurrent.new_fast_executor
|
250
247
|
end
|
251
248
|
|
252
|
-
# @deprecated Use Concurrent.
|
249
|
+
# @deprecated Use Concurrent.disable_executor_auto_termination! instead
|
253
250
|
def auto_terminate=(value)
|
254
|
-
deprecated_method 'Concurrent.configuration.auto_terminate=', 'Concurrent.
|
255
|
-
Concurrent.
|
251
|
+
deprecated_method 'Concurrent.configuration.auto_terminate=', 'Concurrent.disable_executor_auto_termination!'
|
252
|
+
Concurrent.disable_executor_auto_termination! if !value
|
256
253
|
end
|
257
254
|
|
258
255
|
# @deprecated Use Concurrent.auto_terminate_global_executors? instead
|
data/lib/concurrent/edge.rb
CHANGED
@@ -21,10 +21,10 @@ module Concurrent
|
|
21
21
|
#
|
22
22
|
# @!macro [attach] edge_warning
|
23
23
|
# @api Edge
|
24
|
-
# @note **Edge Feature:** Edge features are under active development and may
|
25
|
-
#
|
26
|
-
#
|
24
|
+
# @note **Edge Feature:** Edge features are under active development and may change frequently. They are expected not to
|
25
|
+
# keep backward compatibility (there may also lack tests and documentation). Semantic versions will
|
26
|
+
# be obeyed though. Features developed in `concurrent-ruby-edge` are expected to move
|
27
|
+
# to `concurrent-ruby` when final.
|
27
28
|
module Edge
|
28
|
-
|
29
29
|
end
|
30
30
|
end
|
data/lib/concurrent/errors.rb
CHANGED
@@ -1,40 +1,42 @@
|
|
1
1
|
module Concurrent
|
2
2
|
|
3
|
+
Error = Class.new(StandardError)
|
4
|
+
|
3
5
|
# Raised when errors occur during configuration.
|
4
|
-
ConfigurationError = Class.new(
|
6
|
+
ConfigurationError = Class.new(Error)
|
5
7
|
|
6
8
|
# Raised when an asynchronous operation is cancelled before execution.
|
7
|
-
CancelledOperationError = Class.new(
|
9
|
+
CancelledOperationError = Class.new(Error)
|
8
10
|
|
9
11
|
# Raised when a lifecycle method (such as `stop`) is called in an improper
|
10
12
|
# sequence or when the object is in an inappropriate state.
|
11
|
-
LifecycleError = Class.new(
|
13
|
+
LifecycleError = Class.new(Error)
|
12
14
|
|
13
15
|
# Raised when an attempt is made to violate an immutability guarantee.
|
14
|
-
ImmutabilityError = Class.new(
|
16
|
+
ImmutabilityError = Class.new(Error)
|
15
17
|
|
16
18
|
# Raised when an object's methods are called when it has not been
|
17
19
|
# properly initialized.
|
18
|
-
InitializationError = Class.new(
|
20
|
+
InitializationError = Class.new(Error)
|
19
21
|
|
20
22
|
# Raised when an object with a start/stop lifecycle has been started an
|
21
23
|
# excessive number of times. Often used in conjunction with a restart
|
22
24
|
# policy or strategy.
|
23
|
-
MaxRestartFrequencyError = Class.new(
|
25
|
+
MaxRestartFrequencyError = Class.new(Error)
|
24
26
|
|
25
27
|
# Raised when an attempt is made to modify an immutable object
|
26
28
|
# (such as an `IVar`) after its final state has been set.
|
27
|
-
MultipleAssignmentError = Class.new(
|
29
|
+
MultipleAssignmentError = Class.new(Error)
|
28
30
|
|
29
31
|
# Raised by an `Executor` when it is unable to process a given task,
|
30
32
|
# possibly because of a reject policy or other internal error.
|
31
|
-
RejectedExecutionError = Class.new(
|
33
|
+
RejectedExecutionError = Class.new(Error)
|
32
34
|
|
33
35
|
# Raised when any finite resource, such as a lock counter, exceeds its
|
34
36
|
# maximum limit/threshold.
|
35
|
-
ResourceLimitError = Class.new(
|
37
|
+
ResourceLimitError = Class.new(Error)
|
36
38
|
|
37
39
|
# Raised when an operation times out.
|
38
|
-
TimeoutError = Class.new(
|
40
|
+
TimeoutError = Class.new(Error)
|
39
41
|
|
40
42
|
end
|
@@ -1,46 +1,32 @@
|
|
1
|
-
require 'concurrent/
|
1
|
+
require 'concurrent/utility/engine'
|
2
|
+
require 'concurrent/executor/thread_pool_executor'
|
2
3
|
|
3
4
|
module Concurrent
|
4
5
|
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
when Concurrent.on_jruby?
|
11
|
-
JavaCachedThreadPool
|
12
|
-
else
|
13
|
-
RubyCachedThreadPool
|
14
|
-
end
|
15
|
-
private_constant :CachedThreadPoolImplementation
|
16
|
-
|
17
|
-
# @!macro [attach] cached_thread_pool
|
6
|
+
# A thread pool that dynamically grows and shrinks to fit the current workload.
|
7
|
+
# New threads are created as needed, existing threads are reused, and threads
|
8
|
+
# that remain idle for too long are killed and removed from the pool. These
|
9
|
+
# pools are particularly suited to applications that perform a high volume of
|
10
|
+
# short-lived tasks.
|
18
11
|
#
|
19
|
-
#
|
20
|
-
#
|
21
|
-
#
|
22
|
-
#
|
23
|
-
#
|
12
|
+
# On creation a `CachedThreadPool` has zero running threads. New threads are
|
13
|
+
# created on the pool as new operations are `#post`. The size of the pool
|
14
|
+
# will grow until `#max_length` threads are in the pool or until the number
|
15
|
+
# of threads exceeds the number of running and pending operations. When a new
|
16
|
+
# operation is post to the pool the first available idle thread will be tasked
|
17
|
+
# with the new operation.
|
24
18
|
#
|
25
|
-
#
|
26
|
-
#
|
27
|
-
#
|
28
|
-
#
|
29
|
-
# operation is post to the pool the first available idle thread will be tasked
|
30
|
-
# with the new operation.
|
19
|
+
# Should a thread crash for any reason the thread will immediately be removed
|
20
|
+
# from the pool. Similarly, threads which remain idle for an extended period
|
21
|
+
# of time will be killed and reclaimed. Thus these thread pools are very
|
22
|
+
# efficient at reclaiming unused resources.
|
31
23
|
#
|
32
|
-
#
|
33
|
-
# from the pool. Similarly, threads which remain idle for an extended period
|
34
|
-
# of time will be killed and reclaimed. Thus these thread pools are very
|
35
|
-
# efficient at reclaiming unused resources.
|
36
|
-
#
|
37
|
-
# The API and behavior of this class are based on Java's `CachedThreadPool`
|
24
|
+
# The API and behavior of this class are based on Java's `CachedThreadPool`
|
38
25
|
#
|
39
26
|
# @!macro thread_pool_options
|
40
|
-
|
41
|
-
class CachedThreadPool < CachedThreadPoolImplementation
|
27
|
+
class CachedThreadPool < ThreadPoolExecutor
|
42
28
|
|
43
|
-
# @!macro [
|
29
|
+
# @!macro [attach] cached_thread_pool_method_initialize
|
44
30
|
#
|
45
31
|
# Create a new thread pool.
|
46
32
|
#
|
@@ -50,8 +36,27 @@ module Concurrent
|
|
50
36
|
# @raise [ArgumentError] if `fallback_policy` is not a known policy
|
51
37
|
#
|
52
38
|
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newCachedThreadPool--
|
39
|
+
def initialize(opts = {})
|
40
|
+
defaults = { idletime: DEFAULT_THREAD_IDLETIMEOUT }
|
41
|
+
overrides = { min_threads: 0,
|
42
|
+
max_threads: DEFAULT_MAX_POOL_SIZE,
|
43
|
+
max_queue: DEFAULT_MAX_QUEUE_SIZE }
|
44
|
+
super(defaults.merge(opts).merge(overrides))
|
45
|
+
end
|
46
|
+
|
47
|
+
private
|
53
48
|
|
54
|
-
# @!
|
55
|
-
#
|
49
|
+
# @!macro cached_thread_pool_method_initialize
|
50
|
+
# @!visibility private
|
51
|
+
def ns_initialize(opts)
|
52
|
+
super(opts)
|
53
|
+
if Concurrent.on_jruby?
|
54
|
+
@max_queue = 0
|
55
|
+
@executor = java.util.concurrent.Executors.newCachedThreadPool
|
56
|
+
@executor.setRejectedExecutionHandler(FALLBACK_POLICY_CLASSES[@fallback_policy].new)
|
57
|
+
@executor.setKeepAliveTime(opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT), java.util.concurrent.TimeUnit::SECONDS)
|
58
|
+
self.auto_terminate = opts.fetch(:auto_terminate, true)
|
59
|
+
end
|
60
|
+
end
|
56
61
|
end
|
57
62
|
end
|
@@ -1,19 +1,8 @@
|
|
1
|
-
require 'concurrent/
|
1
|
+
require 'concurrent/utility/engine'
|
2
|
+
require 'concurrent/executor/thread_pool_executor'
|
2
3
|
|
3
4
|
module Concurrent
|
4
5
|
|
5
|
-
if Concurrent.on_jruby?
|
6
|
-
require 'concurrent/executor/java_fixed_thread_pool'
|
7
|
-
end
|
8
|
-
|
9
|
-
FixedThreadPoolImplementation = case
|
10
|
-
when Concurrent.on_jruby?
|
11
|
-
JavaFixedThreadPool
|
12
|
-
else
|
13
|
-
RubyFixedThreadPool
|
14
|
-
end
|
15
|
-
private_constant :FixedThreadPoolImplementation
|
16
|
-
|
17
6
|
# @!macro [new] thread_pool_executor_constant_default_max_pool_size
|
18
7
|
# Default maximum number of threads that will be created in the pool.
|
19
8
|
|
@@ -119,35 +108,7 @@ module Concurrent
|
|
119
108
|
|
120
109
|
|
121
110
|
|
122
|
-
|
123
|
-
# @!macro [new] fixed_thread_pool_method_initialize
|
124
|
-
#
|
125
|
-
# Create a new thread pool.
|
126
|
-
#
|
127
|
-
# @param [Integer] num_threads the number of threads to allocate
|
128
|
-
# @param [Hash] opts the options defining pool behavior.
|
129
|
-
# @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy
|
130
|
-
#
|
131
|
-
# @raise [ArgumentError] if `num_threads` is less than or equal to zero
|
132
|
-
# @raise [ArgumentError] if `fallback_policy` is not a known policy
|
133
|
-
#
|
134
|
-
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newFixedThreadPool-int-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
# @!macro [attach] fixed_thread_pool
|
141
|
-
#
|
142
|
-
# A thread pool with a set number of threads. The number of threads in the pool
|
143
|
-
# is set on construction and remains constant. When all threads are busy new
|
144
|
-
# tasks `#post` to the thread pool are enqueued until a thread becomes available.
|
145
|
-
# Should a thread crash for any reason the thread will immediately be removed
|
146
|
-
# from the pool and replaced.
|
147
|
-
#
|
148
|
-
# The API and behavior of this class are based on Java's `FixedThreadPool`
|
149
|
-
#
|
150
|
-
# @!macro [attach] thread_pool_options
|
111
|
+
# @!macro [new] thread_pool_options
|
151
112
|
#
|
152
113
|
# **Thread Pool Options**
|
153
114
|
#
|
@@ -203,11 +164,43 @@ module Concurrent
|
|
203
164
|
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html Java Executors class
|
204
165
|
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html Java ExecutorService interface
|
205
166
|
# @see http://ruby-doc.org//core-2.2.0/Kernel.html#method-i-at_exit Kernel#at_exit
|
206
|
-
#
|
207
|
-
# @!macro thread_pool_executor_public_api
|
208
|
-
class FixedThreadPool < FixedThreadPoolImplementation
|
209
167
|
|
210
|
-
|
211
|
-
|
168
|
+
|
169
|
+
|
170
|
+
|
171
|
+
|
172
|
+
# @!macro [attach] fixed_thread_pool
|
173
|
+
#
|
174
|
+
# A thread pool with a set number of threads. The number of threads in the pool
|
175
|
+
# is set on construction and remains constant. When all threads are busy new
|
176
|
+
# tasks `#post` to the thread pool are enqueued until a thread becomes available.
|
177
|
+
# Should a thread crash for any reason the thread will immediately be removed
|
178
|
+
# from the pool and replaced.
|
179
|
+
#
|
180
|
+
# The API and behavior of this class are based on Java's `FixedThreadPool`
|
181
|
+
#
|
182
|
+
# @!macro thread_pool_options
|
183
|
+
class FixedThreadPool < ThreadPoolExecutor
|
184
|
+
|
185
|
+
# @!macro [attach] fixed_thread_pool_method_initialize
|
186
|
+
#
|
187
|
+
# Create a new thread pool.
|
188
|
+
#
|
189
|
+
# @param [Integer] num_threads the number of threads to allocate
|
190
|
+
# @param [Hash] opts the options defining pool behavior.
|
191
|
+
# @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy
|
192
|
+
#
|
193
|
+
# @raise [ArgumentError] if `num_threads` is less than or equal to zero
|
194
|
+
# @raise [ArgumentError] if `fallback_policy` is not a known policy
|
195
|
+
#
|
196
|
+
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newFixedThreadPool-int-
|
197
|
+
def initialize(num_threads, opts = {})
|
198
|
+
raise ArgumentError.new('number of threads must be greater than zero') if num_threads.to_i < 1
|
199
|
+
defaults = { max_queue: DEFAULT_MAX_QUEUE_SIZE,
|
200
|
+
idletime: DEFAULT_THREAD_IDLETIMEOUT }
|
201
|
+
overrides = { min_threads: num_threads,
|
202
|
+
max_threads: num_threads }
|
203
|
+
super(defaults.merge(opts).merge(overrides))
|
204
|
+
end
|
212
205
|
end
|
213
206
|
end
|
data/lib/concurrent/ivar.rb
CHANGED
@@ -297,13 +297,14 @@ module Concurrent
|
|
297
297
|
|
298
298
|
# @!visibility private
|
299
299
|
def schedule_next_task(interval = execution_interval)
|
300
|
-
|
300
|
+
ScheduledTask.execute(interval, args: [Concurrent::Event.new], &method(:execute_task))
|
301
|
+
nil
|
301
302
|
end
|
302
303
|
|
303
304
|
# @!visibility private
|
304
305
|
def execute_task(completion)
|
305
|
-
return unless @running.true?
|
306
|
-
|
306
|
+
return nil unless @running.true?
|
307
|
+
ScheduledTask.execute(execution_interval, args: [completion], &method(:timeout_task))
|
307
308
|
_success, value, reason = @executor.execute(self)
|
308
309
|
if completion.try?
|
309
310
|
self.value = value
|
@@ -313,6 +314,7 @@ module Concurrent
|
|
313
314
|
[time, self.value, reason]
|
314
315
|
end
|
315
316
|
end
|
317
|
+
nil
|
316
318
|
end
|
317
319
|
|
318
320
|
# @!visibility private
|
data/lib/concurrent/version.rb
CHANGED
data/lib/concurrent_ruby_ext.jar
CHANGED
Binary file
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: concurrent-ruby
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.9.0.
|
4
|
+
version: 0.9.0.pre3
|
5
5
|
platform: java
|
6
6
|
authors:
|
7
7
|
- Jerry D'Antonio
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2015-06-
|
12
|
+
date: 2015-06-22 00:00:00.000000000 Z
|
13
13
|
dependencies: []
|
14
14
|
description: |
|
15
15
|
Modern concurrency tools including agents, futures, promises, thread pools, actors, supervisors, and more.
|
@@ -69,12 +69,8 @@ files:
|
|
69
69
|
- lib/concurrent/executor/fixed_thread_pool.rb
|
70
70
|
- lib/concurrent/executor/immediate_executor.rb
|
71
71
|
- lib/concurrent/executor/indirect_immediate_executor.rb
|
72
|
-
- lib/concurrent/executor/java_cached_thread_pool.rb
|
73
|
-
- lib/concurrent/executor/java_fixed_thread_pool.rb
|
74
72
|
- lib/concurrent/executor/java_single_thread_executor.rb
|
75
73
|
- lib/concurrent/executor/java_thread_pool_executor.rb
|
76
|
-
- lib/concurrent/executor/ruby_cached_thread_pool.rb
|
77
|
-
- lib/concurrent/executor/ruby_fixed_thread_pool.rb
|
78
74
|
- lib/concurrent/executor/ruby_single_thread_executor.rb
|
79
75
|
- lib/concurrent/executor/ruby_thread_pool_executor.rb
|
80
76
|
- lib/concurrent/executor/safe_task_executor.rb
|
@@ -127,17 +123,17 @@ require_paths:
|
|
127
123
|
- lib
|
128
124
|
required_ruby_version: !ruby/object:Gem::Requirement
|
129
125
|
requirements:
|
130
|
-
- -
|
126
|
+
- - ">="
|
131
127
|
- !ruby/object:Gem::Version
|
132
128
|
version: 1.9.3
|
133
129
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
134
130
|
requirements:
|
135
|
-
- -
|
131
|
+
- - ">"
|
136
132
|
- !ruby/object:Gem::Version
|
137
133
|
version: 1.3.1
|
138
134
|
requirements: []
|
139
135
|
rubyforge_project:
|
140
|
-
rubygems_version: 2.4.
|
136
|
+
rubygems_version: 2.4.8
|
141
137
|
signing_key:
|
142
138
|
specification_version: 4
|
143
139
|
summary: Modern concurrency tools for Ruby. Inspired by Erlang, Clojure, Scala, Haskell, F#, C#, Java, and classic concurrency patterns.
|
@@ -1,34 +0,0 @@
|
|
1
|
-
if Concurrent.on_jruby?
|
2
|
-
|
3
|
-
require 'concurrent/executor/java_thread_pool_executor'
|
4
|
-
|
5
|
-
module Concurrent
|
6
|
-
|
7
|
-
# @!macro cached_thread_pool
|
8
|
-
# @!macro thread_pool_options
|
9
|
-
# @!macro thread_pool_executor_public_api
|
10
|
-
# @!visibility private
|
11
|
-
class JavaCachedThreadPool < JavaThreadPoolExecutor
|
12
|
-
|
13
|
-
# @!macro cached_thread_pool_method_initialize
|
14
|
-
def initialize(opts = {})
|
15
|
-
defaults = { idletime: DEFAULT_THREAD_IDLETIMEOUT }
|
16
|
-
overrides = { min_threads: 0,
|
17
|
-
max_threads: DEFAULT_MAX_POOL_SIZE,
|
18
|
-
max_queue: 0 }
|
19
|
-
super(defaults.merge(opts).merge(overrides))
|
20
|
-
end
|
21
|
-
|
22
|
-
protected
|
23
|
-
|
24
|
-
def ns_initialize(opts)
|
25
|
-
super(opts)
|
26
|
-
@max_queue = 0
|
27
|
-
@executor = java.util.concurrent.Executors.newCachedThreadPool
|
28
|
-
@executor.setRejectedExecutionHandler(FALLBACK_POLICY_CLASSES[@fallback_policy].new)
|
29
|
-
@executor.setKeepAliveTime(opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT), java.util.concurrent.TimeUnit::SECONDS)
|
30
|
-
self.auto_terminate = opts.fetch(:auto_terminate, true)
|
31
|
-
end
|
32
|
-
end
|
33
|
-
end
|
34
|
-
end
|
@@ -1,24 +0,0 @@
|
|
1
|
-
if Concurrent.on_jruby?
|
2
|
-
|
3
|
-
require 'concurrent/executor/java_thread_pool_executor'
|
4
|
-
|
5
|
-
module Concurrent
|
6
|
-
|
7
|
-
# @!macro fixed_thread_pool
|
8
|
-
# @!macro thread_pool_options
|
9
|
-
# @!macro thread_pool_executor_public_api
|
10
|
-
# @!visibility private
|
11
|
-
class JavaFixedThreadPool < JavaThreadPoolExecutor
|
12
|
-
|
13
|
-
# @!macro fixed_thread_pool_method_initialize
|
14
|
-
def initialize(num_threads, opts = {})
|
15
|
-
raise ArgumentError.new('number of threads must be greater than zero') if num_threads.to_i < 1
|
16
|
-
defaults = { max_queue: DEFAULT_MAX_QUEUE_SIZE,
|
17
|
-
idletime: DEFAULT_THREAD_IDLETIMEOUT }
|
18
|
-
overrides = { min_threads: num_threads,
|
19
|
-
max_threads: num_threads }
|
20
|
-
super(defaults.merge(opts).merge(overrides))
|
21
|
-
end
|
22
|
-
end
|
23
|
-
end
|
24
|
-
end
|
@@ -1,20 +0,0 @@
|
|
1
|
-
require 'concurrent/executor/ruby_thread_pool_executor'
|
2
|
-
|
3
|
-
module Concurrent
|
4
|
-
|
5
|
-
# @!macro cached_thread_pool
|
6
|
-
# @!macro thread_pool_options
|
7
|
-
# @!macro thread_pool_executor_public_api
|
8
|
-
# @!visibility private
|
9
|
-
class RubyCachedThreadPool < RubyThreadPoolExecutor
|
10
|
-
|
11
|
-
# @!macro cached_thread_pool_method_initialize
|
12
|
-
def initialize(opts = {})
|
13
|
-
defaults = { idletime: DEFAULT_THREAD_IDLETIMEOUT }
|
14
|
-
overrides = { min_threads: 0,
|
15
|
-
max_threads: DEFAULT_MAX_POOL_SIZE,
|
16
|
-
max_queue: DEFAULT_MAX_QUEUE_SIZE }
|
17
|
-
super(defaults.merge(opts).merge(overrides))
|
18
|
-
end
|
19
|
-
end
|
20
|
-
end
|
@@ -1,21 +0,0 @@
|
|
1
|
-
require 'concurrent/executor/ruby_thread_pool_executor'
|
2
|
-
|
3
|
-
module Concurrent
|
4
|
-
|
5
|
-
# @!macro fixed_thread_pool
|
6
|
-
# @!macro thread_pool_options
|
7
|
-
# @!macro thread_pool_executor_public_api
|
8
|
-
# @!visibility private
|
9
|
-
class RubyFixedThreadPool < RubyThreadPoolExecutor
|
10
|
-
|
11
|
-
# @!macro fixed_thread_pool_method_initialize
|
12
|
-
def initialize(num_threads, opts = {})
|
13
|
-
raise ArgumentError.new('number of threads must be greater than zero') if num_threads.to_i < 1
|
14
|
-
defaults = { max_queue: DEFAULT_MAX_QUEUE_SIZE,
|
15
|
-
idletime: DEFAULT_THREAD_IDLETIMEOUT }
|
16
|
-
overrides = { min_threads: num_threads,
|
17
|
-
max_threads: num_threads }
|
18
|
-
super(defaults.merge(opts).merge(overrides))
|
19
|
-
end
|
20
|
-
end
|
21
|
-
end
|