concurrent-ruby 1.0.3.pre3 → 1.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +14 -0
- data/README.md +1 -1
- data/lib/concurrent.rb +3 -3
- data/lib/concurrent/async.rb +2 -2
- data/lib/concurrent/atom.rb +3 -3
- data/lib/concurrent/atomic/abstract_thread_local_var.rb +29 -3
- data/lib/concurrent/atomic/count_down_latch.rb +23 -0
- data/lib/concurrent/atomic/cyclic_barrier.rb +23 -3
- data/lib/concurrent/atomic/java_thread_local_var.rb +1 -14
- data/lib/concurrent/atomic/mutex_atomic_fixnum.rb +2 -18
- data/lib/concurrent/atomic/mutex_count_down_latch.rb +3 -3
- data/lib/concurrent/atomic/mutex_semaphore.rb +15 -15
- data/lib/concurrent/atomic/ruby_thread_local_var.rb +11 -26
- data/lib/concurrent/atomic/thread_local_var.rb +2 -0
- data/lib/concurrent/concern/obligation.rb +1 -0
- data/lib/concurrent/executor/timer_set.rb +11 -0
- data/lib/concurrent/hash.rb +2 -1
- data/lib/concurrent/map.rb +5 -3
- data/lib/concurrent/promise.rb +10 -6
- data/lib/concurrent/synchronization/condition.rb +1 -0
- data/lib/concurrent/synchronization/lock.rb +1 -0
- data/lib/concurrent/synchronization/truffle_object.rb +1 -2
- data/lib/concurrent/thread_safe/util.rb +2 -0
- data/lib/concurrent/timer_task.rb +3 -3
- data/lib/concurrent/tvar.rb +1 -1
- data/lib/concurrent/utility/engine.rb +3 -3
- data/lib/concurrent/utility/native_integer.rb +53 -0
- data/lib/concurrent/utility/processor_counter.rb +15 -13
- data/lib/concurrent/version.rb +2 -2
- metadata +7 -5
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: faedca437c0657830f5b0cf420bbc9fe8a7f9908
|
4
|
+
data.tar.gz: 26114b1a172fdff0a590694d0c417a9ae3e52b5e
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: ded8d7abaf69a21931f850de692e0b8e34bbd3275ff294a3afe7f3697dd52e3a638a6359f0c5a8ab7b8d60beaecbf9bfc2469868e4a45629c22678975c42f8c8
|
7
|
+
data.tar.gz: 2d9b104c2c7eeb3352aa78dddf6a0c3a1b46752d973409a443cd6b53ed64a948378290a88fbb83a9d292ab12c649deac86a05c1fff5ad51f39169afba9e65673
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,17 @@
|
|
1
|
+
* Trigger execution of flattened delayed futures
|
2
|
+
* Avoid forking for processor_count if possible
|
3
|
+
* Semaphore Mutex and JRuby parity
|
4
|
+
* Adds Map#each as alias to Map#each_pair
|
5
|
+
* Fix uninitialized instance variables
|
6
|
+
* Make Fixnum, Bignum merger ready
|
7
|
+
* Allows Promise#then to receive an executor
|
8
|
+
* TimerSet now survives a fork
|
9
|
+
* Reject promise on any exception
|
10
|
+
* Allow ThreadLocalVar to be initialized with a block
|
11
|
+
* Support Alpha with `Concurrent::processor_count`
|
12
|
+
* Fixes format-security error when compiling ruby_193_compatible.h
|
13
|
+
* Concurrent::Atom#swap fixed: reraise the exceptions from block
|
14
|
+
|
1
15
|
## Current Release v1.0.2 (2 May 2016)
|
2
16
|
|
3
17
|
* Fix bug with `Concurrent::Map` MRI backend `#inspect` method
|
data/README.md
CHANGED
@@ -235,8 +235,8 @@ The best practice is to depend on `concurrent-ruby` and let users to decide if t
|
|
235
235
|
|
236
236
|
## Maintainers
|
237
237
|
|
238
|
+
* [Petr Chalupa](https://github.com/pitr-ch) (lead maintainer)
|
238
239
|
* [Jerry D'Antonio](https://github.com/jdantonio) (creator)
|
239
|
-
* [Petr Chalupa](https://github.com/pitr-ch)
|
240
240
|
* [Michele Della Torre](https://github.com/mighe)
|
241
241
|
* [Chris Seaton](https://github.com/chrisseaton)
|
242
242
|
* [Paweł Obrok](https://github.com/obrok)
|
data/lib/concurrent.rb
CHANGED
@@ -65,13 +65,13 @@ require 'concurrent/options'
|
|
65
65
|
# Object references in Ruby are mutable. This can lead to serious
|
66
66
|
# problems when the {#value} of an object is a mutable reference. Which
|
67
67
|
# is always the case unless the value is a `Fixnum`, `Symbol`, or similar
|
68
|
-
# "
|
68
|
+
# "primitive" data type. Each instance can be configured with a few
|
69
69
|
# options that can help protect the program from potentially dangerous
|
70
|
-
# operations. Each of these options can be optionally set when the
|
70
|
+
# operations. Each of these options can be optionally set when the object
|
71
71
|
# instance is created:
|
72
72
|
#
|
73
73
|
# * `:dup_on_deref` When true the object will call the `#dup` method on
|
74
|
-
# the `value` object every time the `#value`
|
74
|
+
# the `value` object every time the `#value` method is called
|
75
75
|
# (default: false)
|
76
76
|
# * `:freeze_on_deref` When true the object will call the `#freeze`
|
77
77
|
# method on the `value` object every time the `#value` method is called
|
data/lib/concurrent/async.rb
CHANGED
@@ -34,7 +34,7 @@ module Concurrent
|
|
34
34
|
# When an Erlang module implements the `gen_server` behavior it becomes
|
35
35
|
# inherently asynchronous. The `start` or `start_link` function spawns a
|
36
36
|
# process (similar to a thread but much more lightweight and efficient) and
|
37
|
-
#
|
37
|
+
# returns the ID of the process. Using the process ID, other processes can
|
38
38
|
# send messages to the `gen_server` via the `cast` and `call` methods. Unlike
|
39
39
|
# Erlang's `gen_server`, however, `Async` classes do not support linking or
|
40
40
|
# supervision trees.
|
@@ -142,7 +142,7 @@ module Concurrent
|
|
142
142
|
# practice is to read the instance variable into a local variable at the start
|
143
143
|
# of the method then update the instance variable at the *end* of the method.
|
144
144
|
# This way, should an exception be raised during method execution the internal
|
145
|
-
# state of the
|
145
|
+
# state of the object will not have been changed.
|
146
146
|
#
|
147
147
|
# ### Reader Attributes
|
148
148
|
#
|
data/lib/concurrent/atom.rb
CHANGED
@@ -19,7 +19,7 @@ require 'concurrent/synchronization'
|
|
19
19
|
# the value will undergo frequent reads but only occasional, though complex,
|
20
20
|
# updates. Suitable when the result of an update must be known immediately.
|
21
21
|
# * *{Concurrent::AtomicReference}:* A simple object reference that can be
|
22
|
-
# atomically. Updates are synchronous but fast.
|
22
|
+
# atomically. Updates are synchronous but fast. Best used when updates a
|
23
23
|
# simple set operations. Not suitable when updates are complex.
|
24
24
|
# {Concurrent::AtomicBoolean} and {Concurrent::AtomicFixnum} are similar
|
25
25
|
# but optimized for the given data type.
|
@@ -61,7 +61,7 @@ module Concurrent
|
|
61
61
|
# set + [set[-2..-1].reduce{|sum,x| sum + x }]
|
62
62
|
# end
|
63
63
|
#
|
64
|
-
# # create an atom with
|
64
|
+
# # create an atom with an initial value
|
65
65
|
# atom = Concurrent::Atom.new(next_fibonacci)
|
66
66
|
#
|
67
67
|
# # send a few update requests
|
@@ -144,7 +144,7 @@ module Concurrent
|
|
144
144
|
# @param [Object] args Zero or more arguments passed to the block.
|
145
145
|
#
|
146
146
|
# @yield [value, args] Calculates a new value for the atom based on the
|
147
|
-
# current value and any supplied
|
147
|
+
# current value and any supplied arguments.
|
148
148
|
# @yieldparam value [Object] The current value of the atom.
|
149
149
|
# @yieldparam args [Object] All arguments passed to the function, in order.
|
150
150
|
# @yieldreturn [Object] The intended new value of the atom.
|
@@ -8,8 +8,17 @@ module Concurrent
|
|
8
8
|
class AbstractThreadLocalVar
|
9
9
|
|
10
10
|
# @!macro thread_local_var_method_initialize
|
11
|
-
def initialize(default = nil)
|
12
|
-
|
11
|
+
def initialize(default = nil, &default_block)
|
12
|
+
if default && block_given?
|
13
|
+
raise ArgumentError, "Cannot use both value and block as default value"
|
14
|
+
end
|
15
|
+
|
16
|
+
if block_given?
|
17
|
+
@default_block = default_block
|
18
|
+
else
|
19
|
+
@default = default
|
20
|
+
end
|
21
|
+
|
13
22
|
allocate_storage
|
14
23
|
end
|
15
24
|
|
@@ -25,7 +34,15 @@ module Concurrent
|
|
25
34
|
|
26
35
|
# @!macro thread_local_var_method_bind
|
27
36
|
def bind(value, &block)
|
28
|
-
|
37
|
+
if block_given?
|
38
|
+
old_value = self.value
|
39
|
+
begin
|
40
|
+
self.value = value
|
41
|
+
yield
|
42
|
+
ensure
|
43
|
+
self.value = old_value
|
44
|
+
end
|
45
|
+
end
|
29
46
|
end
|
30
47
|
|
31
48
|
protected
|
@@ -34,5 +51,14 @@ module Concurrent
|
|
34
51
|
def allocate_storage
|
35
52
|
raise NotImplementedError
|
36
53
|
end
|
54
|
+
|
55
|
+
# @!visibility private
|
56
|
+
def default
|
57
|
+
if @default_block
|
58
|
+
self.value = @default_block.call
|
59
|
+
else
|
60
|
+
@default
|
61
|
+
end
|
62
|
+
end
|
37
63
|
end
|
38
64
|
end
|
@@ -72,6 +72,29 @@ module Concurrent
|
|
72
72
|
# with its work. A `CountDownLatch` can be used only once. Its value cannot be reset.
|
73
73
|
#
|
74
74
|
# @!macro count_down_latch_public_api
|
75
|
+
# @example Waiter and Decrementer
|
76
|
+
# latch = Concurrent::CountDownLatch.new(3)
|
77
|
+
#
|
78
|
+
# waiter = Thread.new do
|
79
|
+
# latch.wait()
|
80
|
+
# puts ("Waiter released")
|
81
|
+
# end
|
82
|
+
#
|
83
|
+
# decrementer = Thread.new do
|
84
|
+
# sleep(1)
|
85
|
+
# latch.count_down
|
86
|
+
# puts latch.count
|
87
|
+
#
|
88
|
+
# sleep(1)
|
89
|
+
# latch.count_down
|
90
|
+
# puts latch.count
|
91
|
+
#
|
92
|
+
# sleep(1)
|
93
|
+
# latch.count_down
|
94
|
+
# puts latch.count
|
95
|
+
# end
|
96
|
+
#
|
97
|
+
# [waiter, decrementer].each(&:join)
|
75
98
|
class CountDownLatch < CountDownLatchImplementation
|
76
99
|
end
|
77
100
|
end
|
@@ -1,9 +1,29 @@
|
|
1
1
|
require 'concurrent/synchronization'
|
2
|
+
require 'concurrent/utility/native_integer'
|
2
3
|
|
3
4
|
module Concurrent
|
4
5
|
|
5
6
|
# A synchronization aid that allows a set of threads to all wait for each
|
6
7
|
# other to reach a common barrier point.
|
8
|
+
# @example
|
9
|
+
# barrier = Concurrent::CyclicBarrier.new(3)
|
10
|
+
# jobs = Array.new(3) { |i| -> { sleep i; p done: i } }
|
11
|
+
# process = -> (i) do
|
12
|
+
# # waiting to start at the same time
|
13
|
+
# barrier.wait
|
14
|
+
# # execute job
|
15
|
+
# jobs[i].call
|
16
|
+
# # wait for others to finish
|
17
|
+
# barrier.wait
|
18
|
+
# end
|
19
|
+
# threads = 2.times.map do |i|
|
20
|
+
# Thread.new(i, &process)
|
21
|
+
# end
|
22
|
+
#
|
23
|
+
# # use main as well
|
24
|
+
# process.call 2
|
25
|
+
#
|
26
|
+
# # here we can be sure that all jobs are processed
|
7
27
|
class CyclicBarrier < Synchronization::LockableObject
|
8
28
|
|
9
29
|
# @!visibility private
|
@@ -18,9 +38,9 @@ module Concurrent
|
|
18
38
|
#
|
19
39
|
# @raise [ArgumentError] if `parties` is not an integer or is less than zero
|
20
40
|
def initialize(parties, &block)
|
21
|
-
|
22
|
-
|
23
|
-
|
41
|
+
Utility::NativeInteger.ensure_integer_and_bounds parties
|
42
|
+
Utility::NativeInteger.ensure_positive_and_no_zero parties
|
43
|
+
|
24
44
|
super(&nil)
|
25
45
|
synchronize { ns_initialize parties, &block }
|
26
46
|
end
|
@@ -13,7 +13,7 @@ if Concurrent.on_jruby?
|
|
13
13
|
value = @var.get
|
14
14
|
|
15
15
|
if value.nil?
|
16
|
-
|
16
|
+
default
|
17
17
|
elsif value == NULL
|
18
18
|
nil
|
19
19
|
else
|
@@ -26,19 +26,6 @@ if Concurrent.on_jruby?
|
|
26
26
|
@var.set(value)
|
27
27
|
end
|
28
28
|
|
29
|
-
# @!macro thread_local_var_method_bind
|
30
|
-
def bind(value, &block)
|
31
|
-
if block_given?
|
32
|
-
old_value = @var.get
|
33
|
-
begin
|
34
|
-
@var.set(value)
|
35
|
-
yield
|
36
|
-
ensure
|
37
|
-
@var.set(old_value)
|
38
|
-
end
|
39
|
-
end
|
40
|
-
end
|
41
|
-
|
42
29
|
protected
|
43
30
|
|
44
31
|
# @!visibility private
|
@@ -1,4 +1,5 @@
|
|
1
1
|
require 'concurrent/synchronization'
|
2
|
+
require 'concurrent/utility/native_integer'
|
2
3
|
|
3
4
|
module Concurrent
|
4
5
|
|
@@ -7,10 +8,6 @@ module Concurrent
|
|
7
8
|
# @!macro internal_implementation_note
|
8
9
|
class MutexAtomicFixnum < Synchronization::LockableObject
|
9
10
|
|
10
|
-
# http://stackoverflow.com/questions/535721/ruby-max-integer
|
11
|
-
MIN_VALUE = -(2**(0.size * 8 - 2))
|
12
|
-
MAX_VALUE = (2**(0.size * 8 - 2) - 1)
|
13
|
-
|
14
11
|
# @!macro atomic_fixnum_method_initialize
|
15
12
|
def initialize(initial = 0)
|
16
13
|
super()
|
@@ -71,21 +68,8 @@ module Concurrent
|
|
71
68
|
|
72
69
|
# @!visibility private
|
73
70
|
def ns_set(value)
|
74
|
-
|
71
|
+
Utility::NativeInteger.ensure_integer_and_bounds value
|
75
72
|
@value = value
|
76
73
|
end
|
77
|
-
|
78
|
-
# @!visibility private
|
79
|
-
def range_check!(value)
|
80
|
-
if !value.is_a?(Fixnum)
|
81
|
-
raise ArgumentError.new('value value must be a Fixnum')
|
82
|
-
elsif value > MAX_VALUE
|
83
|
-
raise RangeError.new("#{value} is greater than the maximum value of #{MAX_VALUE}")
|
84
|
-
elsif value < MIN_VALUE
|
85
|
-
raise RangeError.new("#{value} is less than the maximum value of #{MIN_VALUE}")
|
86
|
-
else
|
87
|
-
value
|
88
|
-
end
|
89
|
-
end
|
90
74
|
end
|
91
75
|
end
|
@@ -9,9 +9,9 @@ module Concurrent
|
|
9
9
|
|
10
10
|
# @!macro count_down_latch_method_initialize
|
11
11
|
def initialize(count = 1)
|
12
|
-
|
13
|
-
|
14
|
-
|
12
|
+
Utility::NativeInteger.ensure_integer_and_bounds count
|
13
|
+
Utility::NativeInteger.ensure_positive count
|
14
|
+
|
15
15
|
super()
|
16
16
|
synchronize { ns_initialize count }
|
17
17
|
end
|
@@ -1,4 +1,5 @@
|
|
1
1
|
require 'concurrent/synchronization'
|
2
|
+
require 'concurrent/utility/native_integer'
|
2
3
|
|
3
4
|
module Concurrent
|
4
5
|
|
@@ -9,18 +10,17 @@ module Concurrent
|
|
9
10
|
|
10
11
|
# @!macro semaphore_method_initialize
|
11
12
|
def initialize(count)
|
12
|
-
|
13
|
-
|
14
|
-
end
|
13
|
+
Utility::NativeInteger.ensure_integer_and_bounds count
|
14
|
+
|
15
15
|
super()
|
16
16
|
synchronize { ns_initialize count }
|
17
17
|
end
|
18
18
|
|
19
19
|
# @!macro semaphore_method_acquire
|
20
20
|
def acquire(permits = 1)
|
21
|
-
|
22
|
-
|
23
|
-
|
21
|
+
Utility::NativeInteger.ensure_integer_and_bounds permits
|
22
|
+
Utility::NativeInteger.ensure_positive permits
|
23
|
+
|
24
24
|
synchronize do
|
25
25
|
try_acquire_timed(permits, nil)
|
26
26
|
nil
|
@@ -45,9 +45,9 @@ module Concurrent
|
|
45
45
|
|
46
46
|
# @!macro semaphore_method_try_acquire
|
47
47
|
def try_acquire(permits = 1, timeout = nil)
|
48
|
-
|
49
|
-
|
50
|
-
|
48
|
+
Utility::NativeInteger.ensure_integer_and_bounds permits
|
49
|
+
Utility::NativeInteger.ensure_positive permits
|
50
|
+
|
51
51
|
synchronize do
|
52
52
|
if timeout.nil?
|
53
53
|
try_acquire_now(permits)
|
@@ -59,9 +59,9 @@ module Concurrent
|
|
59
59
|
|
60
60
|
# @!macro semaphore_method_release
|
61
61
|
def release(permits = 1)
|
62
|
-
|
63
|
-
|
64
|
-
|
62
|
+
Utility::NativeInteger.ensure_integer_and_bounds permits
|
63
|
+
Utility::NativeInteger.ensure_positive permits
|
64
|
+
|
65
65
|
synchronize do
|
66
66
|
@free += permits
|
67
67
|
permits.times { ns_signal }
|
@@ -81,9 +81,9 @@ module Concurrent
|
|
81
81
|
#
|
82
82
|
# @!visibility private
|
83
83
|
def reduce_permits(reduction)
|
84
|
-
|
85
|
-
|
86
|
-
|
84
|
+
Utility::NativeInteger.ensure_integer_and_bounds reduction
|
85
|
+
Utility::NativeInteger.ensure_positive reduction
|
86
|
+
|
87
87
|
synchronize { @free -= reduction }
|
88
88
|
nil
|
89
89
|
end
|
@@ -35,29 +35,19 @@ module Concurrent
|
|
35
35
|
@@next = 0
|
36
36
|
private_constant :FREE, :LOCK, :ARRAYS
|
37
37
|
|
38
|
-
# @!macro [attach] thread_local_var_method_initialize
|
39
|
-
#
|
40
|
-
# Creates a thread local variable.
|
41
|
-
#
|
42
|
-
# @param [Object] default the default value when otherwise unset
|
43
|
-
def initialize(default = nil)
|
44
|
-
@default = default
|
45
|
-
allocate_storage
|
46
|
-
end
|
47
|
-
|
48
38
|
# @!macro thread_local_var_method_get
|
49
39
|
def value
|
50
40
|
if array = get_threadlocal_array
|
51
41
|
value = array[@index]
|
52
42
|
if value.nil?
|
53
|
-
|
43
|
+
default
|
54
44
|
elsif value.equal?(NULL)
|
55
45
|
nil
|
56
46
|
else
|
57
47
|
value
|
58
48
|
end
|
59
49
|
else
|
60
|
-
|
50
|
+
default
|
61
51
|
end
|
62
52
|
end
|
63
53
|
|
@@ -76,19 +66,6 @@ module Concurrent
|
|
76
66
|
value
|
77
67
|
end
|
78
68
|
|
79
|
-
# @!macro thread_local_var_method_bind
|
80
|
-
def bind(value, &block)
|
81
|
-
if block_given?
|
82
|
-
old_value = self.value
|
83
|
-
begin
|
84
|
-
self.value = value
|
85
|
-
yield
|
86
|
-
ensure
|
87
|
-
self.value = old_value
|
88
|
-
end
|
89
|
-
end
|
90
|
-
end
|
91
|
-
|
92
69
|
protected
|
93
70
|
|
94
71
|
# @!visibility private
|
@@ -158,12 +135,20 @@ module Concurrent
|
|
158
135
|
if array = get_threadlocal_array(thread)
|
159
136
|
value = array[@index]
|
160
137
|
if value.nil?
|
161
|
-
|
138
|
+
default_for(thread)
|
162
139
|
elsif value.equal?(NULL)
|
163
140
|
nil
|
164
141
|
else
|
165
142
|
value
|
166
143
|
end
|
144
|
+
else
|
145
|
+
default_for(thread)
|
146
|
+
end
|
147
|
+
end
|
148
|
+
|
149
|
+
def default_for(thread)
|
150
|
+
if @default_block
|
151
|
+
raise "Cannot use default_for with default block"
|
167
152
|
else
|
168
153
|
@default
|
169
154
|
end
|
@@ -11,6 +11,8 @@ module Concurrent
|
|
11
11
|
# Creates a thread local variable.
|
12
12
|
#
|
13
13
|
# @param [Object] default the default value when otherwise unset
|
14
|
+
# @param [Proc] block Optional block that gets called to obtain the
|
15
|
+
# default value for each thread
|
14
16
|
|
15
17
|
# @!macro [new] thread_local_var_method_get
|
16
18
|
#
|
@@ -78,6 +78,7 @@ module Concurrent
|
|
78
78
|
@task_executor = Options.executor_from_options(opts) || Concurrent.global_io_executor
|
79
79
|
@timer_executor = SingleThreadExecutor.new
|
80
80
|
@condition = Event.new
|
81
|
+
@ruby_pid = $$ # detects if Ruby has forked
|
81
82
|
self.auto_terminate = opts.fetch(:auto_terminate, true)
|
82
83
|
end
|
83
84
|
|
@@ -95,6 +96,7 @@ module Concurrent
|
|
95
96
|
# @!visibility private
|
96
97
|
def ns_post_task(task)
|
97
98
|
return false unless ns_running?
|
99
|
+
ns_reset_if_forked
|
98
100
|
if (task.initial_delay) <= 0.01
|
99
101
|
task.executor.post{ task.process_task }
|
100
102
|
else
|
@@ -121,11 +123,20 @@ module Concurrent
|
|
121
123
|
#
|
122
124
|
# @!visibility private
|
123
125
|
def ns_shutdown_execution
|
126
|
+
ns_reset_if_forked
|
124
127
|
@queue.clear
|
125
128
|
@timer_executor.kill
|
126
129
|
stopped_event.set
|
127
130
|
end
|
128
131
|
|
132
|
+
def ns_reset_if_forked
|
133
|
+
if $$ != @ruby_pid
|
134
|
+
@queue.clear
|
135
|
+
@condition.reset
|
136
|
+
@ruby_pid = $$
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
129
140
|
# Run a loop and execute tasks in the scheduled order and at the approximate
|
130
141
|
# scheduled time. If no tasks remain the thread will exit gracefully so that
|
131
142
|
# garbage collection can occur. If there are no ready tasks it will sleep
|
data/lib/concurrent/hash.rb
CHANGED
@@ -8,7 +8,8 @@ module Concurrent
|
|
8
8
|
#
|
9
9
|
# A thread-safe subclass of Hash. This version locks against the object
|
10
10
|
# itself for every method call, ensuring only one thread can be reading
|
11
|
-
# or writing at a time. This includes iteration methods like `#each
|
11
|
+
# or writing at a time. This includes iteration methods like `#each`,
|
12
|
+
# which takes the lock repeatedly when reading an item.
|
12
13
|
#
|
13
14
|
# @see http://ruby-doc.org/core-2.2.0/Hash.html Ruby standard library `Hash`
|
14
15
|
class Hash < ::Hash;
|
data/lib/concurrent/map.rb
CHANGED
@@ -171,6 +171,8 @@ module Concurrent
|
|
171
171
|
each_pair {|k, v| yield v}
|
172
172
|
end unless method_defined?(:each_value)
|
173
173
|
|
174
|
+
alias_method :each, :each_pair unless method_defined?(:each)
|
175
|
+
|
174
176
|
def key(value)
|
175
177
|
each_pair {|k, v| return k if v == value}
|
176
178
|
nil
|
@@ -203,7 +205,7 @@ module Concurrent
|
|
203
205
|
undef :freeze
|
204
206
|
|
205
207
|
# @!visibility private
|
206
|
-
DEFAULT_OBJ_ID_STR_WIDTH =
|
208
|
+
DEFAULT_OBJ_ID_STR_WIDTH = 0.size == 4 ? 7 : 14 # we want to look "native", 7 for 32-bit, 14 for 64-bit
|
207
209
|
# override default #inspect() method: firstly, we don't want to be spilling our guts (i-vars), secondly, MRI backend's
|
208
210
|
# #inspect() call on its @backend i-var will bump @backend's iter level while possibly yielding GVL
|
209
211
|
def inspect
|
@@ -227,8 +229,8 @@ module Concurrent
|
|
227
229
|
end
|
228
230
|
|
229
231
|
def validate_options_hash!(options)
|
230
|
-
if (initial_capacity = options[:initial_capacity]) && (!initial_capacity.kind_of?(
|
231
|
-
raise ArgumentError, ":initial_capacity must be a positive
|
232
|
+
if (initial_capacity = options[:initial_capacity]) && (!initial_capacity.kind_of?(Integer) || initial_capacity < 0)
|
233
|
+
raise ArgumentError, ":initial_capacity must be a positive Integer"
|
232
234
|
end
|
233
235
|
if (load_factor = options[:load_factor]) && (!load_factor.kind_of?(Numeric) || load_factor <= 0 || load_factor > 1)
|
234
236
|
raise ArgumentError, ":load_factor must be a number between 0 and 1"
|
data/lib/concurrent/promise.rb
CHANGED
@@ -2,6 +2,7 @@ require 'thread'
|
|
2
2
|
require 'concurrent/constants'
|
3
3
|
require 'concurrent/errors'
|
4
4
|
require 'concurrent/ivar'
|
5
|
+
require 'concurrent/executor/safe_task_executor'
|
5
6
|
|
6
7
|
require 'concurrent/options'
|
7
8
|
|
@@ -78,7 +79,7 @@ module Concurrent
|
|
78
79
|
# ```
|
79
80
|
#
|
80
81
|
# Promises can be chained using the `then` method. The `then` method accepts a
|
81
|
-
# block, to be executed on fulfillment, and a callable argument to be executed
|
82
|
+
# block and an executor, to be executed on fulfillment, and a callable argument to be executed
|
82
83
|
# on rejection. The result of the each promise is passed as the block argument
|
83
84
|
# to chained promises.
|
84
85
|
#
|
@@ -92,7 +93,7 @@ module Concurrent
|
|
92
93
|
# p = Concurrent::Promise.fulfill(20).
|
93
94
|
# then{|result| result - 10 }.
|
94
95
|
# then{|result| result * 3 }.
|
95
|
-
# then{|result| result % 5 }.execute
|
96
|
+
# then(executor: different_executor){|result| result % 5 }.execute
|
96
97
|
# ```
|
97
98
|
#
|
98
99
|
# The initial state of a newly created Promise depends on the state of its parent:
|
@@ -102,7 +103,7 @@ module Concurrent
|
|
102
103
|
# - if parent is *rejected* the child will be *pending* (but will ultimately be *rejected*)
|
103
104
|
#
|
104
105
|
# Promises are executed asynchronously from the main thread. By the time a
|
105
|
-
# child Promise finishes
|
106
|
+
# child Promise finishes intialization it may be in a different state than its
|
106
107
|
# parent (by the time a child is created its parent may have completed
|
107
108
|
# execution and changed state). Despite being asynchronous, however, the order
|
108
109
|
# of execution of Promise objects in a chain (or tree) is strictly defined.
|
@@ -300,15 +301,18 @@ module Concurrent
|
|
300
301
|
# @param [Proc] rescuer An optional rescue block to be executed if the
|
301
302
|
# promise is rejected.
|
302
303
|
#
|
304
|
+
# @param [ThreadPool] executor An optional thread pool executor to be used
|
305
|
+
# in the new Promise
|
306
|
+
#
|
303
307
|
# @yield The block operation to be performed asynchronously.
|
304
308
|
#
|
305
309
|
# @return [Promise] the new promise
|
306
|
-
def then(rescuer = nil, &block)
|
310
|
+
def then(rescuer = nil, executor = @executor, &block)
|
307
311
|
raise ArgumentError.new('rescuers and block are both missing') if rescuer.nil? && !block_given?
|
308
312
|
block = Proc.new { |result| result } unless block_given?
|
309
313
|
child = Promise.new(
|
310
314
|
parent: self,
|
311
|
-
executor:
|
315
|
+
executor: executor,
|
312
316
|
on_fulfill: block,
|
313
317
|
on_reject: rescuer
|
314
318
|
)
|
@@ -524,7 +528,7 @@ module Concurrent
|
|
524
528
|
# @!visibility private
|
525
529
|
def realize(task)
|
526
530
|
@executor.post do
|
527
|
-
success, value, reason = SafeTaskExecutor.new(task).execute(*@args)
|
531
|
+
success, value, reason = SafeTaskExecutor.new(task, rescue_exception: true).execute(*@args)
|
528
532
|
complete(success, value, reason)
|
529
533
|
end
|
530
534
|
end
|
@@ -6,8 +6,10 @@ module Concurrent
|
|
6
6
|
# @!visibility private
|
7
7
|
module Util
|
8
8
|
|
9
|
+
# TODO (pitr-ch 15-Oct-2016): migrate to Utility::NativeInteger
|
9
10
|
FIXNUM_BIT_SIZE = (0.size * 8) - 2
|
10
11
|
MAX_INT = (2 ** FIXNUM_BIT_SIZE) - 1
|
12
|
+
# TODO (pitr-ch 15-Oct-2016): migrate to Utility::ProcessorCounter
|
11
13
|
CPU_COUNT = 16 # is there a way to determine this?
|
12
14
|
end
|
13
15
|
end
|
@@ -9,7 +9,7 @@ require 'concurrent/scheduled_task'
|
|
9
9
|
|
10
10
|
module Concurrent
|
11
11
|
|
12
|
-
# A very common
|
12
|
+
# A very common concurrency pattern is to run a thread that performs a task at
|
13
13
|
# regular intervals. The thread that performs the task sleeps for the given
|
14
14
|
# interval then wakes up and performs the task. Lather, rinse, repeat... This
|
15
15
|
# pattern causes two problems. First, it is difficult to test the business
|
@@ -23,7 +23,7 @@ module Concurrent
|
|
23
23
|
# execution interval. The `TimerTask` thread does not perform the task,
|
24
24
|
# however. Instead, the TimerTask launches the task on a separate thread.
|
25
25
|
# Should the task experience an unrecoverable crash only the task thread will
|
26
|
-
# crash. This makes the `TimerTask` very fault tolerant Additionally, the
|
26
|
+
# crash. This makes the `TimerTask` very fault tolerant. Additionally, the
|
27
27
|
# `TimerTask` thread can respond to the success or failure of the task,
|
28
28
|
# performing logging or ancillary operations. `TimerTask` can also be
|
29
29
|
# configured with a timeout value allowing it to kill a task that runs too
|
@@ -41,7 +41,7 @@ module Concurrent
|
|
41
41
|
#
|
42
42
|
# The `TimerTask` class includes the `Dereferenceable` mixin module so the
|
43
43
|
# result of the last execution is always available via the `#value` method.
|
44
|
-
#
|
44
|
+
# Dereferencing options can be passed to the `TimerTask` during construction or
|
45
45
|
# at any later time using the `#set_deref_options` method.
|
46
46
|
#
|
47
47
|
# `TimerTask` supports notification through the Ruby standard library
|
data/lib/concurrent/tvar.rb
CHANGED
@@ -151,7 +151,7 @@ module Concurrent
|
|
151
151
|
raise Transaction::AbortError.new
|
152
152
|
end
|
153
153
|
|
154
|
-
# Leave a transaction without
|
154
|
+
# Leave a transaction without committing or aborting - see `Concurrent::atomically`.
|
155
155
|
def leave_transaction
|
156
156
|
raise Transaction::LeaveError.new
|
157
157
|
end
|
@@ -8,7 +8,7 @@ module Concurrent
|
|
8
8
|
end
|
9
9
|
|
10
10
|
def on_jruby_9000?
|
11
|
-
on_jruby? &&
|
11
|
+
on_jruby? && ruby_version(:>=, 9, 0, 0, JRUBY_VERSION)
|
12
12
|
end
|
13
13
|
|
14
14
|
def on_cruby?
|
@@ -39,8 +39,8 @@ module Concurrent
|
|
39
39
|
defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby'
|
40
40
|
end
|
41
41
|
|
42
|
-
def ruby_version(comparison, major, minor
|
43
|
-
result = (
|
42
|
+
def ruby_version(comparison, major, minor, patch, version = RUBY_VERSION)
|
43
|
+
result = (version.split('.').map(&:to_i) <=> [major, minor, patch])
|
44
44
|
comparisons = { :== => [0],
|
45
45
|
:>= => [1, 0],
|
46
46
|
:<= => [-1, 0],
|
@@ -0,0 +1,53 @@
|
|
1
|
+
module Concurrent
|
2
|
+
module Utility
|
3
|
+
# @private
|
4
|
+
module NativeInteger
|
5
|
+
# http://stackoverflow.com/questions/535721/ruby-max-integer
|
6
|
+
MIN_VALUE = -(2**(0.size * 8 - 2))
|
7
|
+
MAX_VALUE = (2**(0.size * 8 - 2) - 1)
|
8
|
+
|
9
|
+
def ensure_upper_bound(value)
|
10
|
+
if value > MAX_VALUE
|
11
|
+
raise RangeError.new("#{value} is greater than the maximum value of #{MAX_VALUE}")
|
12
|
+
end
|
13
|
+
value
|
14
|
+
end
|
15
|
+
|
16
|
+
def ensure_lower_bound(value)
|
17
|
+
if value < MIN_VALUE
|
18
|
+
raise RangeError.new("#{value} is less than the maximum value of #{MIN_VALUE}")
|
19
|
+
end
|
20
|
+
value
|
21
|
+
end
|
22
|
+
|
23
|
+
def ensure_integer(value)
|
24
|
+
unless value.is_a?(Integer)
|
25
|
+
raise ArgumentError.new("#{value} is not an Integer")
|
26
|
+
end
|
27
|
+
value
|
28
|
+
end
|
29
|
+
|
30
|
+
def ensure_integer_and_bounds(value)
|
31
|
+
ensure_integer value
|
32
|
+
ensure_upper_bound value
|
33
|
+
ensure_lower_bound value
|
34
|
+
end
|
35
|
+
|
36
|
+
def ensure_positive(value)
|
37
|
+
if value < 0
|
38
|
+
raise ArgumentError.new("#{value} cannot be negative")
|
39
|
+
end
|
40
|
+
value
|
41
|
+
end
|
42
|
+
|
43
|
+
def ensure_positive_and_no_zero(value)
|
44
|
+
if value < 1
|
45
|
+
raise ArgumentError.new("#{value} cannot be negative or zero")
|
46
|
+
end
|
47
|
+
value
|
48
|
+
end
|
49
|
+
|
50
|
+
extend self
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
@@ -28,6 +28,7 @@ module Concurrent
|
|
28
28
|
# processor", which taked into account hyperthreading.
|
29
29
|
#
|
30
30
|
# * AIX: /usr/sbin/pmcycles (AIX 5+), /usr/sbin/lsdev
|
31
|
+
# * Alpha: /usr/bin/nproc (/proc/cpuinfo exists but cannot be used)
|
31
32
|
# * BSD: /sbin/sysctl
|
32
33
|
# * Cygwin: /proc/cpuinfo
|
33
34
|
# * Darwin: /usr/bin/hwprefs, /usr/sbin/sysctl
|
@@ -84,27 +85,28 @@ module Concurrent
|
|
84
85
|
result = WIN32OLE.connect("winmgmts://").ExecQuery(
|
85
86
|
"select NumberOfLogicalProcessors from Win32_Processor")
|
86
87
|
result.to_enum.collect(&:NumberOfLogicalProcessors).reduce(:+)
|
87
|
-
elsif File.readable?("/proc/cpuinfo")
|
88
|
-
|
88
|
+
elsif File.readable?("/proc/cpuinfo") && (cpuinfo_count = IO.read("/proc/cpuinfo").scan(/^processor/).size) > 0
|
89
|
+
cpuinfo_count
|
90
|
+
elsif File.executable?("/usr/bin/nproc")
|
91
|
+
IO.popen("/usr/bin/nproc --all", &:read).to_i
|
89
92
|
elsif File.executable?("/usr/bin/hwprefs")
|
90
|
-
IO.popen("/usr/bin/hwprefs thread_count").
|
93
|
+
IO.popen("/usr/bin/hwprefs thread_count", &:read).to_i
|
91
94
|
elsif File.executable?("/usr/sbin/psrinfo")
|
92
|
-
IO.popen("/usr/sbin/psrinfo").
|
95
|
+
IO.popen("/usr/sbin/psrinfo", &:read).scan(/^.*on-*line/).size
|
93
96
|
elsif File.executable?("/usr/sbin/ioscan")
|
94
|
-
IO.popen("/usr/sbin/ioscan -kC processor")
|
95
|
-
out.read.scan(/^.*processor/).size
|
96
|
-
end
|
97
|
+
IO.popen("/usr/sbin/ioscan -kC processor", &:read).scan(/^.*processor/).size
|
97
98
|
elsif File.executable?("/usr/sbin/pmcycles")
|
98
|
-
IO.popen("/usr/sbin/pmcycles -m").
|
99
|
+
IO.popen("/usr/sbin/pmcycles -m", &:read).count("\n")
|
99
100
|
elsif File.executable?("/usr/sbin/lsdev")
|
100
|
-
IO.popen("/usr/sbin/lsdev -Cc processor -S 1").
|
101
|
+
IO.popen("/usr/sbin/lsdev -Cc processor -S 1", &:read).count("\n")
|
101
102
|
elsif File.executable?("/usr/sbin/sysconf") and os_name =~ /irix/i
|
102
|
-
IO.popen("/usr/sbin/sysconf NPROC_ONLN").
|
103
|
+
IO.popen("/usr/sbin/sysconf NPROC_ONLN", &:read).to_i
|
103
104
|
elsif File.executable?("/usr/sbin/sysctl")
|
104
|
-
IO.popen("/usr/sbin/sysctl -n hw.ncpu").
|
105
|
+
IO.popen("/usr/sbin/sysctl -n hw.ncpu", &:read).to_i
|
105
106
|
elsif File.executable?("/sbin/sysctl")
|
106
|
-
IO.popen("/sbin/sysctl -n hw.ncpu").
|
107
|
+
IO.popen("/sbin/sysctl -n hw.ncpu", &:read).to_i
|
107
108
|
else
|
109
|
+
# TODO (pitr-ch 05-Nov-2016): warn about failures
|
108
110
|
1
|
109
111
|
end
|
110
112
|
end
|
@@ -115,7 +117,7 @@ module Concurrent
|
|
115
117
|
def compute_physical_processor_count
|
116
118
|
ppc = case RbConfig::CONFIG["target_os"]
|
117
119
|
when /darwin1/
|
118
|
-
IO.popen("/usr/sbin/sysctl -n hw.physicalcpu").
|
120
|
+
IO.popen("/usr/sbin/sysctl -n hw.physicalcpu", &:read).to_i
|
119
121
|
when /linux/
|
120
122
|
cores = {} # unique physical ID / core ID combinations
|
121
123
|
phy = 0
|
data/lib/concurrent/version.rb
CHANGED
metadata
CHANGED
@@ -1,15 +1,16 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: concurrent-ruby
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.0.3
|
4
|
+
version: 1.0.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Jerry D'Antonio
|
8
|
+
- Petr Chalupa
|
8
9
|
- The Ruby Concurrency Team
|
9
10
|
autorequire:
|
10
11
|
bindir: bin
|
11
12
|
cert_chain: []
|
12
|
-
date: 2016-
|
13
|
+
date: 2016-12-17 00:00:00.000000000 Z
|
13
14
|
dependencies: []
|
14
15
|
description: |
|
15
16
|
Modern concurrency tools including agents, futures, promises, thread pools, actors, supervisors, and more.
|
@@ -147,6 +148,7 @@ files:
|
|
147
148
|
- lib/concurrent/utility/engine.rb
|
148
149
|
- lib/concurrent/utility/monotonic_time.rb
|
149
150
|
- lib/concurrent/utility/native_extension_loader.rb
|
151
|
+
- lib/concurrent/utility/native_integer.rb
|
150
152
|
- lib/concurrent/utility/processor_counter.rb
|
151
153
|
- lib/concurrent/version.rb
|
152
154
|
homepage: http://www.concurrent-ruby.com
|
@@ -164,12 +166,12 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
164
166
|
version: 1.9.3
|
165
167
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
166
168
|
requirements:
|
167
|
-
- - "
|
169
|
+
- - ">="
|
168
170
|
- !ruby/object:Gem::Version
|
169
|
-
version:
|
171
|
+
version: '0'
|
170
172
|
requirements: []
|
171
173
|
rubyforge_project:
|
172
|
-
rubygems_version: 2.
|
174
|
+
rubygems_version: 2.5.1
|
173
175
|
signing_key:
|
174
176
|
specification_version: 4
|
175
177
|
summary: Modern concurrency tools for Ruby. Inspired by Erlang, Clojure, Scala, Haskell,
|