concurrent-ruby 0.9.2-java → 1.0.0-java
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +49 -1
- data/README.md +86 -120
- data/lib/concurrent.rb +14 -5
- data/lib/concurrent/agent.rb +587 -0
- data/lib/concurrent/array.rb +39 -0
- data/lib/concurrent/async.rb +296 -149
- data/lib/concurrent/atom.rb +135 -45
- data/lib/concurrent/atomic/abstract_thread_local_var.rb +38 -0
- data/lib/concurrent/atomic/atomic_boolean.rb +83 -118
- data/lib/concurrent/atomic/atomic_fixnum.rb +101 -163
- data/lib/concurrent/atomic/atomic_reference.rb +1 -8
- data/lib/concurrent/atomic/count_down_latch.rb +62 -103
- data/lib/concurrent/atomic/cyclic_barrier.rb +3 -1
- data/lib/concurrent/atomic/event.rb +1 -1
- data/lib/concurrent/atomic/java_count_down_latch.rb +39 -0
- data/lib/concurrent/atomic/java_thread_local_var.rb +50 -0
- data/lib/concurrent/atomic/mutex_atomic_boolean.rb +60 -0
- data/lib/concurrent/atomic/mutex_atomic_fixnum.rb +91 -0
- data/lib/concurrent/atomic/mutex_count_down_latch.rb +43 -0
- data/lib/concurrent/atomic/mutex_semaphore.rb +115 -0
- data/lib/concurrent/atomic/read_write_lock.rb +5 -4
- data/lib/concurrent/atomic/reentrant_read_write_lock.rb +3 -1
- data/lib/concurrent/atomic/ruby_thread_local_var.rb +172 -0
- data/lib/concurrent/atomic/semaphore.rb +84 -178
- data/lib/concurrent/atomic/thread_local_var.rb +65 -294
- data/lib/concurrent/atomic_reference/jruby+truffle.rb +1 -0
- data/lib/concurrent/atomic_reference/jruby.rb +1 -1
- data/lib/concurrent/atomic_reference/mutex_atomic.rb +14 -8
- data/lib/concurrent/atomic_reference/ruby.rb +1 -1
- data/lib/concurrent/atomics.rb +7 -37
- data/lib/concurrent/collection/copy_on_notify_observer_set.rb +7 -15
- data/lib/concurrent/collection/copy_on_write_observer_set.rb +7 -15
- data/lib/concurrent/collection/java_non_concurrent_priority_queue.rb +84 -0
- data/lib/concurrent/collection/map/atomic_reference_map_backend.rb +927 -0
- data/lib/concurrent/collection/map/mri_map_backend.rb +66 -0
- data/lib/concurrent/collection/map/non_concurrent_map_backend.rb +144 -0
- data/lib/concurrent/collection/map/synchronized_map_backend.rb +86 -0
- data/lib/concurrent/collection/non_concurrent_priority_queue.rb +143 -0
- data/lib/concurrent/collection/ruby_non_concurrent_priority_queue.rb +150 -0
- data/lib/concurrent/concern/dereferenceable.rb +9 -24
- data/lib/concurrent/concern/logging.rb +1 -1
- data/lib/concurrent/concern/obligation.rb +11 -20
- data/lib/concurrent/concern/observable.rb +38 -13
- data/lib/concurrent/configuration.rb +23 -152
- data/lib/concurrent/constants.rb +8 -0
- data/lib/concurrent/delay.rb +14 -12
- data/lib/concurrent/exchanger.rb +339 -41
- data/lib/concurrent/executor/abstract_executor_service.rb +134 -0
- data/lib/concurrent/executor/executor_service.rb +23 -359
- data/lib/concurrent/executor/immediate_executor.rb +3 -2
- data/lib/concurrent/executor/java_executor_service.rb +100 -0
- data/lib/concurrent/executor/java_single_thread_executor.rb +3 -3
- data/lib/concurrent/executor/java_thread_pool_executor.rb +3 -4
- data/lib/concurrent/executor/ruby_executor_service.rb +78 -0
- data/lib/concurrent/executor/ruby_single_thread_executor.rb +10 -66
- data/lib/concurrent/executor/ruby_thread_pool_executor.rb +25 -22
- data/lib/concurrent/executor/safe_task_executor.rb +6 -7
- data/lib/concurrent/executor/serial_executor_service.rb +34 -0
- data/lib/concurrent/executor/serialized_execution.rb +10 -33
- data/lib/concurrent/executor/serialized_execution_delegator.rb +28 -0
- data/lib/concurrent/executor/simple_executor_service.rb +1 -10
- data/lib/concurrent/executor/single_thread_executor.rb +20 -10
- data/lib/concurrent/executor/timer_set.rb +8 -10
- data/lib/concurrent/executors.rb +12 -2
- data/lib/concurrent/future.rb +6 -4
- data/lib/concurrent/hash.rb +35 -0
- data/lib/concurrent/immutable_struct.rb +5 -1
- data/lib/concurrent/ivar.rb +12 -16
- data/lib/concurrent/lazy_register.rb +11 -8
- data/lib/concurrent/map.rb +180 -0
- data/lib/concurrent/maybe.rb +6 -3
- data/lib/concurrent/mutable_struct.rb +7 -6
- data/lib/concurrent/mvar.rb +26 -2
- data/lib/concurrent/{executor/executor.rb → options.rb} +5 -29
- data/lib/concurrent/promise.rb +7 -5
- data/lib/concurrent/scheduled_task.rb +13 -71
- data/lib/concurrent/settable_struct.rb +5 -4
- data/lib/concurrent/synchronization.rb +15 -3
- data/lib/concurrent/synchronization/abstract_lockable_object.rb +98 -0
- data/lib/concurrent/synchronization/abstract_object.rb +7 -146
- data/lib/concurrent/synchronization/abstract_struct.rb +2 -3
- data/lib/concurrent/synchronization/condition.rb +6 -4
- data/lib/concurrent/synchronization/jruby_lockable_object.rb +13 -0
- data/lib/concurrent/synchronization/jruby_object.rb +44 -0
- data/lib/concurrent/synchronization/lock.rb +3 -2
- data/lib/concurrent/synchronization/lockable_object.rb +72 -0
- data/lib/concurrent/synchronization/mri_lockable_object.rb +71 -0
- data/lib/concurrent/synchronization/mri_object.rb +43 -0
- data/lib/concurrent/synchronization/object.rb +140 -73
- data/lib/concurrent/synchronization/rbx_lockable_object.rb +65 -0
- data/lib/concurrent/synchronization/rbx_object.rb +30 -73
- data/lib/concurrent/synchronization/volatile.rb +34 -0
- data/lib/concurrent/thread_safe/synchronized_delegator.rb +50 -0
- data/lib/concurrent/thread_safe/util.rb +14 -0
- data/lib/concurrent/thread_safe/util/adder.rb +74 -0
- data/lib/concurrent/thread_safe/util/array_hash_rbx.rb +30 -0
- data/lib/concurrent/thread_safe/util/cheap_lockable.rb +118 -0
- data/lib/concurrent/thread_safe/util/power_of_two_tuple.rb +38 -0
- data/lib/concurrent/thread_safe/util/striped64.rb +241 -0
- data/lib/concurrent/thread_safe/util/volatile.rb +75 -0
- data/lib/concurrent/thread_safe/util/xor_shift_random.rb +50 -0
- data/lib/concurrent/timer_task.rb +3 -4
- data/lib/concurrent/tuple.rb +86 -0
- data/lib/concurrent/tvar.rb +5 -1
- data/lib/concurrent/utility/at_exit.rb +1 -1
- data/lib/concurrent/utility/engine.rb +4 -0
- data/lib/concurrent/utility/monotonic_time.rb +3 -4
- data/lib/concurrent/utility/native_extension_loader.rb +50 -30
- data/lib/concurrent/version.rb +2 -2
- data/lib/concurrent_ruby_ext.jar +0 -0
- metadata +47 -12
- data/lib/concurrent/atomic/condition.rb +0 -78
- data/lib/concurrent/collection/priority_queue.rb +0 -360
- data/lib/concurrent/synchronization/java_object.rb +0 -34
- data/lib/concurrent/synchronization/monitor_object.rb +0 -27
- data/lib/concurrent/synchronization/mutex_object.rb +0 -43
- data/lib/concurrent/utilities.rb +0 -5
- data/lib/concurrent/utility/timeout.rb +0 -39
- data/lib/concurrent/utility/timer.rb +0 -26
- data/lib/concurrent_ruby.rb +0 -2
@@ -0,0 +1,38 @@
|
|
1
|
+
require 'concurrent/thread_safe/util'
|
2
|
+
require 'concurrent/tuple'
|
3
|
+
|
4
|
+
module Concurrent
|
5
|
+
|
6
|
+
# @!visibility private
|
7
|
+
module ThreadSafe
|
8
|
+
|
9
|
+
# @!visibility private
|
10
|
+
module Util
|
11
|
+
|
12
|
+
# @!visibility private
|
13
|
+
class PowerOfTwoTuple < Concurrent::Tuple
|
14
|
+
|
15
|
+
def initialize(size)
|
16
|
+
raise ArgumentError, "size must be a power of 2 (#{size.inspect} provided)" unless size > 0 && size & (size - 1) == 0
|
17
|
+
super(size)
|
18
|
+
end
|
19
|
+
|
20
|
+
def hash_to_index(hash)
|
21
|
+
(size - 1) & hash
|
22
|
+
end
|
23
|
+
|
24
|
+
def volatile_get_by_hash(hash)
|
25
|
+
volatile_get(hash_to_index(hash))
|
26
|
+
end
|
27
|
+
|
28
|
+
def volatile_set_by_hash(hash, value)
|
29
|
+
volatile_set(hash_to_index(hash), value)
|
30
|
+
end
|
31
|
+
|
32
|
+
def next_in_size_table
|
33
|
+
self.class.new(size << 1)
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
@@ -0,0 +1,241 @@
|
|
1
|
+
require 'concurrent/thread_safe/util'
|
2
|
+
require 'concurrent/thread_safe/util/power_of_two_tuple'
|
3
|
+
require 'concurrent/thread_safe/util/volatile'
|
4
|
+
require 'concurrent/thread_safe/util/xor_shift_random'
|
5
|
+
|
6
|
+
module Concurrent
|
7
|
+
|
8
|
+
# @!visibility private
|
9
|
+
module ThreadSafe
|
10
|
+
|
11
|
+
# @!visibility private
|
12
|
+
module Util
|
13
|
+
|
14
|
+
# A Ruby port of the Doug Lea's jsr166e.Striped64 class version 1.6
|
15
|
+
# available in public domain.
|
16
|
+
#
|
17
|
+
# Original source code available here:
|
18
|
+
# http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/Striped64.java?revision=1.6
|
19
|
+
#
|
20
|
+
# Class holding common representation and mechanics for classes supporting
|
21
|
+
# dynamic striping on 64bit values.
|
22
|
+
#
|
23
|
+
# This class maintains a lazily-initialized table of atomically updated
|
24
|
+
# variables, plus an extra +base+ field. The table size is a power of two.
|
25
|
+
# Indexing uses masked per-thread hash codes. Nearly all methods on this
|
26
|
+
# class are private, accessed directly by subclasses.
|
27
|
+
#
|
28
|
+
# Table entries are of class +Cell+; a variant of AtomicLong padded to
|
29
|
+
# reduce cache contention on most processors. Padding is overkill for most
|
30
|
+
# Atomics because they are usually irregularly scattered in memory and thus
|
31
|
+
# don't interfere much with each other. But Atomic objects residing in
|
32
|
+
# arrays will tend to be placed adjacent to each other, and so will most
|
33
|
+
# often share cache lines (with a huge negative performance impact) without
|
34
|
+
# this precaution.
|
35
|
+
#
|
36
|
+
# In part because +Cell+s are relatively large, we avoid creating them until
|
37
|
+
# they are needed. When there is no contention, all updates are made to the
|
38
|
+
# +base+ field. Upon first contention (a failed CAS on +base+ update), the
|
39
|
+
# table is initialized to size 2. The table size is doubled upon further
|
40
|
+
# contention until reaching the nearest power of two greater than or equal
|
41
|
+
# to the number of CPUS. Table slots remain empty (+nil+) until they are
|
42
|
+
# needed.
|
43
|
+
#
|
44
|
+
# A single spinlock (+busy+) is used for initializing and resizing the
|
45
|
+
# table, as well as populating slots with new +Cell+s. There is no need for
|
46
|
+
# a blocking lock: When the lock is not available, threads try other slots
|
47
|
+
# (or the base). During these retries, there is increased contention and
|
48
|
+
# reduced locality, which is still better than alternatives.
|
49
|
+
#
|
50
|
+
# Per-thread hash codes are initialized to random values. Contention and/or
|
51
|
+
# table collisions are indicated by failed CASes when performing an update
|
52
|
+
# operation (see method +retry_update+). Upon a collision, if the table size
|
53
|
+
# is less than the capacity, it is doubled in size unless some other thread
|
54
|
+
# holds the lock. If a hashed slot is empty, and lock is available, a new
|
55
|
+
# +Cell+ is created. Otherwise, if the slot exists, a CAS is tried. Retries
|
56
|
+
# proceed by "double hashing", using a secondary hash (XorShift) to try to
|
57
|
+
# find a free slot.
|
58
|
+
#
|
59
|
+
# The table size is capped because, when there are more threads than CPUs,
|
60
|
+
# supposing that each thread were bound to a CPU, there would exist a
|
61
|
+
# perfect hash function mapping threads to slots that eliminates collisions.
|
62
|
+
# When we reach capacity, we search for this mapping by randomly varying the
|
63
|
+
# hash codes of colliding threads. Because search is random, and collisions
|
64
|
+
# only become known via CAS failures, convergence can be slow, and because
|
65
|
+
# threads are typically not bound to CPUS forever, may not occur at all.
|
66
|
+
# However, despite these limitations, observed contention rates are
|
67
|
+
# typically low in these cases.
|
68
|
+
#
|
69
|
+
# It is possible for a +Cell+ to become unused when threads that once hashed
|
70
|
+
# to it terminate, as well as in the case where doubling the table causes no
|
71
|
+
# thread to hash to it under expanded mask. We do not try to detect or
|
72
|
+
# remove such cells, under the assumption that for long-running instances,
|
73
|
+
# observed contention levels will recur, so the cells will eventually be
|
74
|
+
# needed again; and for short-lived ones, it does not matter.
|
75
|
+
#
|
76
|
+
# @!visibility private
|
77
|
+
class Striped64
|
78
|
+
|
79
|
+
# Padded variant of AtomicLong supporting only raw accesses plus CAS.
|
80
|
+
# The +value+ field is placed between pads, hoping that the JVM doesn't
|
81
|
+
# reorder them.
|
82
|
+
#
|
83
|
+
# Optimisation note: It would be possible to use a release-only
|
84
|
+
# form of CAS here, if it were provided.
|
85
|
+
#
|
86
|
+
# @!visibility private
|
87
|
+
class Cell < Concurrent::AtomicReference
|
88
|
+
|
89
|
+
# TODO: this only adds padding after the :value slot, need to find a way to add padding before the slot
|
90
|
+
# @!visibility private
|
91
|
+
attr_reader *(12.times.collect{ |i| "padding_#{i}".to_sym })
|
92
|
+
|
93
|
+
alias_method :cas, :compare_and_set
|
94
|
+
|
95
|
+
def cas_computed
|
96
|
+
cas(current_value = value, yield(current_value))
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
extend Volatile
|
101
|
+
attr_volatile :cells, # Table of cells. When non-null, size is a power of 2.
|
102
|
+
:base, # Base value, used mainly when there is no contention, but also as a fallback during table initialization races. Updated via CAS.
|
103
|
+
:busy # Spinlock (locked via CAS) used when resizing and/or creating Cells.
|
104
|
+
|
105
|
+
alias_method :busy?, :busy
|
106
|
+
|
107
|
+
def initialize
|
108
|
+
super()
|
109
|
+
self.busy = false
|
110
|
+
self.base = 0
|
111
|
+
end
|
112
|
+
|
113
|
+
# Handles cases of updates involving initialization, resizing,
|
114
|
+
# creating new Cells, and/or contention. See above for
|
115
|
+
# explanation. This method suffers the usual non-modularity
|
116
|
+
# problems of optimistic retry code, relying on rechecked sets of
|
117
|
+
# reads.
|
118
|
+
#
|
119
|
+
# Arguments:
|
120
|
+
# [+x+]
|
121
|
+
# the value
|
122
|
+
# [+hash_code+]
|
123
|
+
# hash code used
|
124
|
+
# [+x+]
|
125
|
+
# false if CAS failed before call
|
126
|
+
def retry_update(x, hash_code, was_uncontended) # :yields: current_value
|
127
|
+
hash = hash_code
|
128
|
+
collided = false # True if last slot nonempty
|
129
|
+
while true
|
130
|
+
if current_cells = cells
|
131
|
+
if !(cell = current_cells.volatile_get_by_hash(hash))
|
132
|
+
if busy?
|
133
|
+
collided = false
|
134
|
+
else # Try to attach new Cell
|
135
|
+
if try_to_install_new_cell(Cell.new(x), hash) # Optimistically create and try to insert new cell
|
136
|
+
break
|
137
|
+
else
|
138
|
+
redo # Slot is now non-empty
|
139
|
+
end
|
140
|
+
end
|
141
|
+
elsif !was_uncontended # CAS already known to fail
|
142
|
+
was_uncontended = true # Continue after rehash
|
143
|
+
elsif cell.cas_computed {|current_value| yield current_value}
|
144
|
+
break
|
145
|
+
elsif current_cells.size >= CPU_COUNT || cells != current_cells # At max size or stale
|
146
|
+
collided = false
|
147
|
+
elsif collided && expand_table_unless_stale(current_cells)
|
148
|
+
collided = false
|
149
|
+
redo # Retry with expanded table
|
150
|
+
else
|
151
|
+
collided = true
|
152
|
+
end
|
153
|
+
hash = XorShiftRandom.xorshift(hash)
|
154
|
+
|
155
|
+
elsif try_initialize_cells(x, hash) || cas_base_computed {|current_base| yield current_base}
|
156
|
+
break
|
157
|
+
end
|
158
|
+
end
|
159
|
+
self.hash_code = hash
|
160
|
+
end
|
161
|
+
|
162
|
+
private
|
163
|
+
# Static per-thread hash code key. Shared across all instances to
|
164
|
+
# reduce Thread locals pollution and because adjustments due to
|
165
|
+
# collisions in one table are likely to be appropriate for
|
166
|
+
# others.
|
167
|
+
THREAD_LOCAL_KEY = "#{name}.hash_code".to_sym
|
168
|
+
|
169
|
+
# A thread-local hash code accessor. The code is initially
|
170
|
+
# random, but may be set to a different value upon collisions.
|
171
|
+
def hash_code
|
172
|
+
Thread.current[THREAD_LOCAL_KEY] ||= XorShiftRandom.get
|
173
|
+
end
|
174
|
+
|
175
|
+
def hash_code=(hash)
|
176
|
+
Thread.current[THREAD_LOCAL_KEY] = hash
|
177
|
+
end
|
178
|
+
|
179
|
+
# Sets base and all +cells+ to the given value.
|
180
|
+
def internal_reset(initial_value)
|
181
|
+
current_cells = cells
|
182
|
+
self.base = initial_value
|
183
|
+
if current_cells
|
184
|
+
current_cells.each do |cell|
|
185
|
+
cell.value = initial_value if cell
|
186
|
+
end
|
187
|
+
end
|
188
|
+
end
|
189
|
+
|
190
|
+
def cas_base_computed
|
191
|
+
cas_base(current_base = base, yield(current_base))
|
192
|
+
end
|
193
|
+
|
194
|
+
def free?
|
195
|
+
!busy?
|
196
|
+
end
|
197
|
+
|
198
|
+
def try_initialize_cells(x, hash)
|
199
|
+
if free? && !cells
|
200
|
+
try_in_busy do
|
201
|
+
unless cells # Recheck under lock
|
202
|
+
new_cells = PowerOfTwoTuple.new(2)
|
203
|
+
new_cells.volatile_set_by_hash(hash, Cell.new(x))
|
204
|
+
self.cells = new_cells
|
205
|
+
end
|
206
|
+
end
|
207
|
+
end
|
208
|
+
end
|
209
|
+
|
210
|
+
def expand_table_unless_stale(current_cells)
|
211
|
+
try_in_busy do
|
212
|
+
if current_cells == cells # Recheck under lock
|
213
|
+
new_cells = current_cells.next_in_size_table
|
214
|
+
current_cells.each_with_index {|x, i| new_cells.volatile_set(i, x)}
|
215
|
+
self.cells = new_cells
|
216
|
+
end
|
217
|
+
end
|
218
|
+
end
|
219
|
+
|
220
|
+
def try_to_install_new_cell(new_cell, hash)
|
221
|
+
try_in_busy do
|
222
|
+
# Recheck under lock
|
223
|
+
if (current_cells = cells) && !current_cells.volatile_get(i = current_cells.hash_to_index(hash))
|
224
|
+
current_cells.volatile_set(i, new_cell)
|
225
|
+
end
|
226
|
+
end
|
227
|
+
end
|
228
|
+
|
229
|
+
def try_in_busy
|
230
|
+
if cas_busy(false, true)
|
231
|
+
begin
|
232
|
+
yield
|
233
|
+
ensure
|
234
|
+
self.busy = false
|
235
|
+
end
|
236
|
+
end
|
237
|
+
end
|
238
|
+
end
|
239
|
+
end
|
240
|
+
end
|
241
|
+
end
|
@@ -0,0 +1,75 @@
|
|
1
|
+
require 'concurrent/thread_safe/util'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
|
5
|
+
# @!visibility private
|
6
|
+
module ThreadSafe
|
7
|
+
|
8
|
+
# @!visibility private
|
9
|
+
module Util
|
10
|
+
|
11
|
+
# @!visibility private
|
12
|
+
module Volatile
|
13
|
+
|
14
|
+
# Provides +volatile+ (in the JVM's sense) attribute accessors implemented
|
15
|
+
# atop of +Concurrent::AtomicReference+.
|
16
|
+
#
|
17
|
+
# Usage:
|
18
|
+
# class Foo
|
19
|
+
# extend Concurrent::ThreadSafe::Util::Volatile
|
20
|
+
# attr_volatile :foo, :bar
|
21
|
+
#
|
22
|
+
# def initialize(bar)
|
23
|
+
# super() # must super() into parent initializers before using the volatile attribute accessors
|
24
|
+
# self.bar = bar
|
25
|
+
# end
|
26
|
+
#
|
27
|
+
# def hello
|
28
|
+
# my_foo = foo # volatile read
|
29
|
+
# self.foo = 1 # volatile write
|
30
|
+
# cas_foo(1, 2) # => true | a strong CAS
|
31
|
+
# end
|
32
|
+
# end
|
33
|
+
def attr_volatile(*attr_names)
|
34
|
+
return if attr_names.empty?
|
35
|
+
include(Module.new do
|
36
|
+
atomic_ref_setup = attr_names.map {|attr_name| "@__#{attr_name} = Concurrent::AtomicReference.new"}
|
37
|
+
initialize_copy_setup = attr_names.zip(atomic_ref_setup).map do |attr_name, ref_setup|
|
38
|
+
"#{ref_setup}(other.instance_variable_get(:@__#{attr_name}).get)"
|
39
|
+
end
|
40
|
+
class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1
|
41
|
+
def initialize(*)
|
42
|
+
super
|
43
|
+
#{atomic_ref_setup.join('; ')}
|
44
|
+
end
|
45
|
+
|
46
|
+
def initialize_copy(other)
|
47
|
+
super
|
48
|
+
#{initialize_copy_setup.join('; ')}
|
49
|
+
end
|
50
|
+
RUBY_EVAL
|
51
|
+
|
52
|
+
attr_names.each do |attr_name|
|
53
|
+
class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1
|
54
|
+
def #{attr_name}
|
55
|
+
@__#{attr_name}.get
|
56
|
+
end
|
57
|
+
|
58
|
+
def #{attr_name}=(value)
|
59
|
+
@__#{attr_name}.set(value)
|
60
|
+
end
|
61
|
+
|
62
|
+
def compare_and_set_#{attr_name}(old_value, new_value)
|
63
|
+
@__#{attr_name}.compare_and_set(old_value, new_value)
|
64
|
+
end
|
65
|
+
RUBY_EVAL
|
66
|
+
|
67
|
+
alias_method :"cas_#{attr_name}", :"compare_and_set_#{attr_name}"
|
68
|
+
alias_method :"lazy_set_#{attr_name}", :"#{attr_name}="
|
69
|
+
end
|
70
|
+
end)
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
@@ -0,0 +1,50 @@
|
|
1
|
+
require 'concurrent/thread_safe/util'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
|
5
|
+
# @!visibility private
|
6
|
+
module ThreadSafe
|
7
|
+
|
8
|
+
# @!visibility private
|
9
|
+
module Util
|
10
|
+
|
11
|
+
# A xorshift random number (positive +Fixnum+s) generator, provides
|
12
|
+
# reasonably cheap way to generate thread local random numbers without
|
13
|
+
# contending for the global +Kernel.rand+.
|
14
|
+
#
|
15
|
+
# Usage:
|
16
|
+
# x = XorShiftRandom.get # uses Kernel.rand to generate an initial seed
|
17
|
+
# while true
|
18
|
+
# if (x = XorShiftRandom.xorshift).odd? # thread-localy generate a next random number
|
19
|
+
# do_something_at_random
|
20
|
+
# end
|
21
|
+
# end
|
22
|
+
module XorShiftRandom
|
23
|
+
extend self
|
24
|
+
MAX_XOR_SHIFTABLE_INT = MAX_INT - 1
|
25
|
+
|
26
|
+
# Generates an initial non-zero positive +Fixnum+ via +Kernel.rand+.
|
27
|
+
def get
|
28
|
+
Kernel.rand(MAX_XOR_SHIFTABLE_INT) + 1 # 0 can't be xorshifted
|
29
|
+
end
|
30
|
+
|
31
|
+
# xorshift based on: http://www.jstatsoft.org/v08/i14/paper
|
32
|
+
if 0.size == 4
|
33
|
+
# using the "yˆ=y>>a; yˆ=y<<b; yˆ=y>>c;" transform with the (a,b,c) tuple with values (3,1,14) to minimise Bignum overflows
|
34
|
+
def xorshift(x)
|
35
|
+
x ^= x >> 3
|
36
|
+
x ^= (x << 1) & MAX_INT # cut-off Bignum overflow
|
37
|
+
x ^= x >> 14
|
38
|
+
end
|
39
|
+
else
|
40
|
+
# using the "yˆ=y>>a; yˆ=y<<b; yˆ=y>>c;" transform with the (a,b,c) tuple with values (1,1,54) to minimise Bignum overflows
|
41
|
+
def xorshift(x)
|
42
|
+
x ^= x >> 1
|
43
|
+
x ^= (x << 1) & MAX_INT # cut-off Bignum overflow
|
44
|
+
x ^= x >> 54
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
@@ -268,10 +268,9 @@ module Concurrent
|
|
268
268
|
|
269
269
|
private :post, :<<
|
270
270
|
|
271
|
-
|
271
|
+
private
|
272
272
|
|
273
273
|
def ns_initialize(opts, &task)
|
274
|
-
init_mutex(self)
|
275
274
|
set_deref_options(opts)
|
276
275
|
|
277
276
|
self.execution_interval = opts[:execution] || opts[:execution_interval] || EXECUTION_INTERVAL
|
@@ -284,13 +283,13 @@ module Concurrent
|
|
284
283
|
end
|
285
284
|
|
286
285
|
# @!visibility private
|
287
|
-
def
|
286
|
+
def ns_shutdown_execution
|
288
287
|
@running.make_false
|
289
288
|
super
|
290
289
|
end
|
291
290
|
|
292
291
|
# @!visibility private
|
293
|
-
def
|
292
|
+
def ns_kill_execution
|
294
293
|
@running.make_false
|
295
294
|
super
|
296
295
|
end
|
@@ -0,0 +1,86 @@
|
|
1
|
+
require 'concurrent/atomic/atomic_reference'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
|
5
|
+
# A fixed size array with volatile (synchronized, thread safe) getters/setters.
|
6
|
+
# Mixes in Ruby's `Enumerable` module for enhanced search, sort, and traversal.
|
7
|
+
#
|
8
|
+
# @example
|
9
|
+
# tuple = Concurrent::Tuple.new(16)
|
10
|
+
#
|
11
|
+
# tuple.set(0, :foo) #=> :foo | volatile write
|
12
|
+
# tuple.get(0) #=> :foo | volatile read
|
13
|
+
# tuple.compare_and_set(0, :foo, :bar) #=> true | strong CAS
|
14
|
+
# tuple.cas(0, :foo, :baz) #=> false | strong CAS
|
15
|
+
# tuple.get(0) #=> :bar | volatile read
|
16
|
+
#
|
17
|
+
# @see https://en.wikipedia.org/wiki/Tuple Tuple entry at Wikipedia
|
18
|
+
# @see http://www.erlang.org/doc/reference_manual/data_types.html#id70396 Erlang Tuple
|
19
|
+
# @see http://ruby-doc.org/core-2.2.2/Enumerable.html Enumerable
|
20
|
+
class Tuple
|
21
|
+
include Enumerable
|
22
|
+
|
23
|
+
# The (fixed) size of the tuple.
|
24
|
+
attr_reader :size
|
25
|
+
|
26
|
+
# @!visibility private
|
27
|
+
Tuple = defined?(Rubinius::Tuple) ? Rubinius::Tuple : Array
|
28
|
+
private_constant :Tuple
|
29
|
+
|
30
|
+
# Create a new tuple of the given size.
|
31
|
+
#
|
32
|
+
# @param [Integer] size the number of elements in the tuple
|
33
|
+
def initialize(size)
|
34
|
+
@size = size
|
35
|
+
@tuple = tuple = Tuple.new(size)
|
36
|
+
i = 0
|
37
|
+
while i < size
|
38
|
+
tuple[i] = Concurrent::AtomicReference.new
|
39
|
+
i += 1
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
# Get the value of the element at the given index.
|
44
|
+
#
|
45
|
+
# @param [Integer] i the index from which to retrieve the value
|
46
|
+
# @return [Object] the value at the given index or nil if the index is out of bounds
|
47
|
+
def get(i)
|
48
|
+
return nil if i >= @size || i < 0
|
49
|
+
@tuple[i].get
|
50
|
+
end
|
51
|
+
alias_method :volatile_get, :get
|
52
|
+
|
53
|
+
# Set the element at the given index to the given value
|
54
|
+
#
|
55
|
+
# @param [Integer] i the index for the element to set
|
56
|
+
# @param [Object] value the value to set at the given index
|
57
|
+
#
|
58
|
+
# @return [Object] the new value of the element at the given index or nil if the index is out of bounds
|
59
|
+
def set(i, value)
|
60
|
+
return nil if i >= @size || i < 0
|
61
|
+
@tuple[i].set(value)
|
62
|
+
end
|
63
|
+
alias_method :volatile_set, :set
|
64
|
+
|
65
|
+
# Set the value at the given index to the new value if and only if the current
|
66
|
+
# value matches the given old value.
|
67
|
+
#
|
68
|
+
# @param [Integer] i the index for the element to set
|
69
|
+
# @param [Object] old_value the value to compare against the current value
|
70
|
+
# @param [Object] new_value the value to set at the given index
|
71
|
+
#
|
72
|
+
# @return [Boolean] true if the value at the given element was set else false
|
73
|
+
def compare_and_set(i, old_value, new_value)
|
74
|
+
return false if i >= @size || i < 0
|
75
|
+
@tuple[i].compare_and_set(old_value, new_value)
|
76
|
+
end
|
77
|
+
alias_method :cas, :compare_and_set
|
78
|
+
|
79
|
+
# Calls the given block once for each element in self, passing that element as a parameter.
|
80
|
+
#
|
81
|
+
# @yieldparam [Object] ref the `Concurrent::AtomicReference` object at the current index
|
82
|
+
def each
|
83
|
+
@tuple.each {|ref| yield ref.get}
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|