concurrent-ruby 1.1.5
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +478 -0
- data/Gemfile +41 -0
- data/LICENSE.md +23 -0
- data/README.md +381 -0
- data/Rakefile +327 -0
- data/ext/concurrent-ruby/ConcurrentRubyService.java +17 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/AtomicReferenceLibrary.java +175 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JRubyMapBackendLibrary.java +248 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicBooleanLibrary.java +93 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java +113 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java +159 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/SynchronizationLibrary.java +307 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMap.java +31 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java +3863 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/LongAdder.java +203 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/Striped64.java +342 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java +3800 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/LongAdder.java +204 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/Striped64.java +291 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166y/ThreadLocalRandom.java +199 -0
- data/lib/concurrent-ruby.rb +1 -0
- data/lib/concurrent.rb +134 -0
- data/lib/concurrent/agent.rb +587 -0
- data/lib/concurrent/array.rb +66 -0
- data/lib/concurrent/async.rb +459 -0
- data/lib/concurrent/atom.rb +222 -0
- data/lib/concurrent/atomic/abstract_thread_local_var.rb +66 -0
- data/lib/concurrent/atomic/atomic_boolean.rb +126 -0
- data/lib/concurrent/atomic/atomic_fixnum.rb +143 -0
- data/lib/concurrent/atomic/atomic_markable_reference.rb +164 -0
- data/lib/concurrent/atomic/atomic_reference.rb +204 -0
- data/lib/concurrent/atomic/count_down_latch.rb +100 -0
- data/lib/concurrent/atomic/cyclic_barrier.rb +128 -0
- data/lib/concurrent/atomic/event.rb +109 -0
- data/lib/concurrent/atomic/java_count_down_latch.rb +42 -0
- data/lib/concurrent/atomic/java_thread_local_var.rb +37 -0
- data/lib/concurrent/atomic/mutex_atomic_boolean.rb +62 -0
- data/lib/concurrent/atomic/mutex_atomic_fixnum.rb +75 -0
- data/lib/concurrent/atomic/mutex_count_down_latch.rb +44 -0
- data/lib/concurrent/atomic/mutex_semaphore.rb +115 -0
- data/lib/concurrent/atomic/read_write_lock.rb +254 -0
- data/lib/concurrent/atomic/reentrant_read_write_lock.rb +379 -0
- data/lib/concurrent/atomic/ruby_thread_local_var.rb +161 -0
- data/lib/concurrent/atomic/semaphore.rb +145 -0
- data/lib/concurrent/atomic/thread_local_var.rb +104 -0
- data/lib/concurrent/atomic_reference/mutex_atomic.rb +56 -0
- data/lib/concurrent/atomic_reference/numeric_cas_wrapper.rb +28 -0
- data/lib/concurrent/atomics.rb +10 -0
- data/lib/concurrent/collection/copy_on_notify_observer_set.rb +107 -0
- data/lib/concurrent/collection/copy_on_write_observer_set.rb +111 -0
- data/lib/concurrent/collection/java_non_concurrent_priority_queue.rb +84 -0
- data/lib/concurrent/collection/lock_free_stack.rb +158 -0
- data/lib/concurrent/collection/map/atomic_reference_map_backend.rb +927 -0
- data/lib/concurrent/collection/map/mri_map_backend.rb +66 -0
- data/lib/concurrent/collection/map/non_concurrent_map_backend.rb +140 -0
- data/lib/concurrent/collection/map/synchronized_map_backend.rb +82 -0
- data/lib/concurrent/collection/non_concurrent_priority_queue.rb +143 -0
- data/lib/concurrent/collection/ruby_non_concurrent_priority_queue.rb +150 -0
- data/lib/concurrent/concern/deprecation.rb +34 -0
- data/lib/concurrent/concern/dereferenceable.rb +73 -0
- data/lib/concurrent/concern/logging.rb +32 -0
- data/lib/concurrent/concern/obligation.rb +220 -0
- data/lib/concurrent/concern/observable.rb +110 -0
- data/lib/concurrent/concurrent_ruby.jar +0 -0
- data/lib/concurrent/configuration.rb +184 -0
- data/lib/concurrent/constants.rb +8 -0
- data/lib/concurrent/dataflow.rb +81 -0
- data/lib/concurrent/delay.rb +199 -0
- data/lib/concurrent/errors.rb +69 -0
- data/lib/concurrent/exchanger.rb +352 -0
- data/lib/concurrent/executor/abstract_executor_service.rb +134 -0
- data/lib/concurrent/executor/cached_thread_pool.rb +62 -0
- data/lib/concurrent/executor/executor_service.rb +185 -0
- data/lib/concurrent/executor/fixed_thread_pool.rb +206 -0
- data/lib/concurrent/executor/immediate_executor.rb +66 -0
- data/lib/concurrent/executor/indirect_immediate_executor.rb +44 -0
- data/lib/concurrent/executor/java_executor_service.rb +91 -0
- data/lib/concurrent/executor/java_single_thread_executor.rb +29 -0
- data/lib/concurrent/executor/java_thread_pool_executor.rb +123 -0
- data/lib/concurrent/executor/ruby_executor_service.rb +78 -0
- data/lib/concurrent/executor/ruby_single_thread_executor.rb +22 -0
- data/lib/concurrent/executor/ruby_thread_pool_executor.rb +362 -0
- data/lib/concurrent/executor/safe_task_executor.rb +35 -0
- data/lib/concurrent/executor/serial_executor_service.rb +34 -0
- data/lib/concurrent/executor/serialized_execution.rb +107 -0
- data/lib/concurrent/executor/serialized_execution_delegator.rb +28 -0
- data/lib/concurrent/executor/simple_executor_service.rb +100 -0
- data/lib/concurrent/executor/single_thread_executor.rb +56 -0
- data/lib/concurrent/executor/thread_pool_executor.rb +87 -0
- data/lib/concurrent/executor/timer_set.rb +173 -0
- data/lib/concurrent/executors.rb +20 -0
- data/lib/concurrent/future.rb +141 -0
- data/lib/concurrent/hash.rb +59 -0
- data/lib/concurrent/immutable_struct.rb +93 -0
- data/lib/concurrent/ivar.rb +207 -0
- data/lib/concurrent/map.rb +337 -0
- data/lib/concurrent/maybe.rb +229 -0
- data/lib/concurrent/mutable_struct.rb +229 -0
- data/lib/concurrent/mvar.rb +242 -0
- data/lib/concurrent/options.rb +42 -0
- data/lib/concurrent/promise.rb +579 -0
- data/lib/concurrent/promises.rb +2167 -0
- data/lib/concurrent/re_include.rb +58 -0
- data/lib/concurrent/scheduled_task.rb +318 -0
- data/lib/concurrent/set.rb +66 -0
- data/lib/concurrent/settable_struct.rb +129 -0
- data/lib/concurrent/synchronization.rb +30 -0
- data/lib/concurrent/synchronization/abstract_lockable_object.rb +98 -0
- data/lib/concurrent/synchronization/abstract_object.rb +24 -0
- data/lib/concurrent/synchronization/abstract_struct.rb +160 -0
- data/lib/concurrent/synchronization/condition.rb +60 -0
- data/lib/concurrent/synchronization/jruby_lockable_object.rb +13 -0
- data/lib/concurrent/synchronization/jruby_object.rb +45 -0
- data/lib/concurrent/synchronization/lock.rb +36 -0
- data/lib/concurrent/synchronization/lockable_object.rb +74 -0
- data/lib/concurrent/synchronization/mri_object.rb +44 -0
- data/lib/concurrent/synchronization/mutex_lockable_object.rb +76 -0
- data/lib/concurrent/synchronization/object.rb +183 -0
- data/lib/concurrent/synchronization/rbx_lockable_object.rb +65 -0
- data/lib/concurrent/synchronization/rbx_object.rb +49 -0
- data/lib/concurrent/synchronization/truffleruby_object.rb +47 -0
- data/lib/concurrent/synchronization/volatile.rb +36 -0
- data/lib/concurrent/thread_safe/synchronized_delegator.rb +50 -0
- data/lib/concurrent/thread_safe/util.rb +16 -0
- data/lib/concurrent/thread_safe/util/adder.rb +74 -0
- data/lib/concurrent/thread_safe/util/cheap_lockable.rb +118 -0
- data/lib/concurrent/thread_safe/util/data_structures.rb +63 -0
- data/lib/concurrent/thread_safe/util/power_of_two_tuple.rb +38 -0
- data/lib/concurrent/thread_safe/util/striped64.rb +246 -0
- data/lib/concurrent/thread_safe/util/volatile.rb +75 -0
- data/lib/concurrent/thread_safe/util/xor_shift_random.rb +50 -0
- data/lib/concurrent/timer_task.rb +334 -0
- data/lib/concurrent/tuple.rb +86 -0
- data/lib/concurrent/tvar.rb +258 -0
- data/lib/concurrent/utility/at_exit.rb +97 -0
- data/lib/concurrent/utility/engine.rb +56 -0
- data/lib/concurrent/utility/monotonic_time.rb +58 -0
- data/lib/concurrent/utility/native_extension_loader.rb +79 -0
- data/lib/concurrent/utility/native_integer.rb +53 -0
- data/lib/concurrent/utility/processor_counter.rb +158 -0
- data/lib/concurrent/version.rb +3 -0
- metadata +193 -0
@@ -0,0 +1,246 @@
|
|
1
|
+
require 'concurrent/thread_safe/util'
|
2
|
+
require 'concurrent/thread_safe/util/power_of_two_tuple'
|
3
|
+
require 'concurrent/thread_safe/util/volatile'
|
4
|
+
require 'concurrent/thread_safe/util/xor_shift_random'
|
5
|
+
|
6
|
+
module Concurrent
|
7
|
+
|
8
|
+
# @!visibility private
|
9
|
+
module ThreadSafe
|
10
|
+
|
11
|
+
# @!visibility private
|
12
|
+
module Util
|
13
|
+
|
14
|
+
# A Ruby port of the Doug Lea's jsr166e.Striped64 class version 1.6
|
15
|
+
# available in public domain.
|
16
|
+
#
|
17
|
+
# Original source code available here:
|
18
|
+
# http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/Striped64.java?revision=1.6
|
19
|
+
#
|
20
|
+
# Class holding common representation and mechanics for classes supporting
|
21
|
+
# dynamic striping on 64bit values.
|
22
|
+
#
|
23
|
+
# This class maintains a lazily-initialized table of atomically updated
|
24
|
+
# variables, plus an extra +base+ field. The table size is a power of two.
|
25
|
+
# Indexing uses masked per-thread hash codes. Nearly all methods on this
|
26
|
+
# class are private, accessed directly by subclasses.
|
27
|
+
#
|
28
|
+
# Table entries are of class +Cell+; a variant of AtomicLong padded to
|
29
|
+
# reduce cache contention on most processors. Padding is overkill for most
|
30
|
+
# Atomics because they are usually irregularly scattered in memory and thus
|
31
|
+
# don't interfere much with each other. But Atomic objects residing in
|
32
|
+
# arrays will tend to be placed adjacent to each other, and so will most
|
33
|
+
# often share cache lines (with a huge negative performance impact) without
|
34
|
+
# this precaution.
|
35
|
+
#
|
36
|
+
# In part because +Cell+s are relatively large, we avoid creating them until
|
37
|
+
# they are needed. When there is no contention, all updates are made to the
|
38
|
+
# +base+ field. Upon first contention (a failed CAS on +base+ update), the
|
39
|
+
# table is initialized to size 2. The table size is doubled upon further
|
40
|
+
# contention until reaching the nearest power of two greater than or equal
|
41
|
+
# to the number of CPUS. Table slots remain empty (+nil+) until they are
|
42
|
+
# needed.
|
43
|
+
#
|
44
|
+
# A single spinlock (+busy+) is used for initializing and resizing the
|
45
|
+
# table, as well as populating slots with new +Cell+s. There is no need for
|
46
|
+
# a blocking lock: When the lock is not available, threads try other slots
|
47
|
+
# (or the base). During these retries, there is increased contention and
|
48
|
+
# reduced locality, which is still better than alternatives.
|
49
|
+
#
|
50
|
+
# Per-thread hash codes are initialized to random values. Contention and/or
|
51
|
+
# table collisions are indicated by failed CASes when performing an update
|
52
|
+
# operation (see method +retry_update+). Upon a collision, if the table size
|
53
|
+
# is less than the capacity, it is doubled in size unless some other thread
|
54
|
+
# holds the lock. If a hashed slot is empty, and lock is available, a new
|
55
|
+
# +Cell+ is created. Otherwise, if the slot exists, a CAS is tried. Retries
|
56
|
+
# proceed by "double hashing", using a secondary hash (XorShift) to try to
|
57
|
+
# find a free slot.
|
58
|
+
#
|
59
|
+
# The table size is capped because, when there are more threads than CPUs,
|
60
|
+
# supposing that each thread were bound to a CPU, there would exist a
|
61
|
+
# perfect hash function mapping threads to slots that eliminates collisions.
|
62
|
+
# When we reach capacity, we search for this mapping by randomly varying the
|
63
|
+
# hash codes of colliding threads. Because search is random, and collisions
|
64
|
+
# only become known via CAS failures, convergence can be slow, and because
|
65
|
+
# threads are typically not bound to CPUS forever, may not occur at all.
|
66
|
+
# However, despite these limitations, observed contention rates are
|
67
|
+
# typically low in these cases.
|
68
|
+
#
|
69
|
+
# It is possible for a +Cell+ to become unused when threads that once hashed
|
70
|
+
# to it terminate, as well as in the case where doubling the table causes no
|
71
|
+
# thread to hash to it under expanded mask. We do not try to detect or
|
72
|
+
# remove such cells, under the assumption that for long-running instances,
|
73
|
+
# observed contention levels will recur, so the cells will eventually be
|
74
|
+
# needed again; and for short-lived ones, it does not matter.
|
75
|
+
#
|
76
|
+
# @!visibility private
|
77
|
+
class Striped64
|
78
|
+
|
79
|
+
# Padded variant of AtomicLong supporting only raw accesses plus CAS.
|
80
|
+
# The +value+ field is placed between pads, hoping that the JVM doesn't
|
81
|
+
# reorder them.
|
82
|
+
#
|
83
|
+
# Optimisation note: It would be possible to use a release-only
|
84
|
+
# form of CAS here, if it were provided.
|
85
|
+
#
|
86
|
+
# @!visibility private
|
87
|
+
class Cell < Concurrent::AtomicReference
|
88
|
+
|
89
|
+
alias_method :cas, :compare_and_set
|
90
|
+
|
91
|
+
def cas_computed
|
92
|
+
cas(current_value = value, yield(current_value))
|
93
|
+
end
|
94
|
+
|
95
|
+
# @!visibility private
|
96
|
+
def self.padding
|
97
|
+
# TODO: this only adds padding after the :value slot, need to find a way to add padding before the slot
|
98
|
+
# TODO (pitr-ch 28-Jul-2018): the padding instance vars may not be created
|
99
|
+
# hide from yardoc in a method
|
100
|
+
attr_reader *(12.times.collect{ |i| "padding_#{i}".to_sym })
|
101
|
+
end
|
102
|
+
padding
|
103
|
+
end
|
104
|
+
|
105
|
+
extend Volatile
|
106
|
+
attr_volatile :cells, # Table of cells. When non-null, size is a power of 2.
|
107
|
+
:base, # Base value, used mainly when there is no contention, but also as a fallback during table initialization races. Updated via CAS.
|
108
|
+
:busy # Spinlock (locked via CAS) used when resizing and/or creating Cells.
|
109
|
+
|
110
|
+
alias_method :busy?, :busy
|
111
|
+
|
112
|
+
def initialize
|
113
|
+
super()
|
114
|
+
self.busy = false
|
115
|
+
self.base = 0
|
116
|
+
end
|
117
|
+
|
118
|
+
# Handles cases of updates involving initialization, resizing,
|
119
|
+
# creating new Cells, and/or contention. See above for
|
120
|
+
# explanation. This method suffers the usual non-modularity
|
121
|
+
# problems of optimistic retry code, relying on rechecked sets of
|
122
|
+
# reads.
|
123
|
+
#
|
124
|
+
# Arguments:
|
125
|
+
# [+x+]
|
126
|
+
# the value
|
127
|
+
# [+hash_code+]
|
128
|
+
# hash code used
|
129
|
+
# [+x+]
|
130
|
+
# false if CAS failed before call
|
131
|
+
def retry_update(x, hash_code, was_uncontended) # :yields: current_value
|
132
|
+
hash = hash_code
|
133
|
+
collided = false # True if last slot nonempty
|
134
|
+
while true
|
135
|
+
if current_cells = cells
|
136
|
+
if !(cell = current_cells.volatile_get_by_hash(hash))
|
137
|
+
if busy?
|
138
|
+
collided = false
|
139
|
+
else # Try to attach new Cell
|
140
|
+
if try_to_install_new_cell(Cell.new(x), hash) # Optimistically create and try to insert new cell
|
141
|
+
break
|
142
|
+
else
|
143
|
+
redo # Slot is now non-empty
|
144
|
+
end
|
145
|
+
end
|
146
|
+
elsif !was_uncontended # CAS already known to fail
|
147
|
+
was_uncontended = true # Continue after rehash
|
148
|
+
elsif cell.cas_computed {|current_value| yield current_value}
|
149
|
+
break
|
150
|
+
elsif current_cells.size >= CPU_COUNT || cells != current_cells # At max size or stale
|
151
|
+
collided = false
|
152
|
+
elsif collided && expand_table_unless_stale(current_cells)
|
153
|
+
collided = false
|
154
|
+
redo # Retry with expanded table
|
155
|
+
else
|
156
|
+
collided = true
|
157
|
+
end
|
158
|
+
hash = XorShiftRandom.xorshift(hash)
|
159
|
+
|
160
|
+
elsif try_initialize_cells(x, hash) || cas_base_computed {|current_base| yield current_base}
|
161
|
+
break
|
162
|
+
end
|
163
|
+
end
|
164
|
+
self.hash_code = hash
|
165
|
+
end
|
166
|
+
|
167
|
+
private
|
168
|
+
# Static per-thread hash code key. Shared across all instances to
|
169
|
+
# reduce Thread locals pollution and because adjustments due to
|
170
|
+
# collisions in one table are likely to be appropriate for
|
171
|
+
# others.
|
172
|
+
THREAD_LOCAL_KEY = "#{name}.hash_code".to_sym
|
173
|
+
|
174
|
+
# A thread-local hash code accessor. The code is initially
|
175
|
+
# random, but may be set to a different value upon collisions.
|
176
|
+
def hash_code
|
177
|
+
Thread.current[THREAD_LOCAL_KEY] ||= XorShiftRandom.get
|
178
|
+
end
|
179
|
+
|
180
|
+
def hash_code=(hash)
|
181
|
+
Thread.current[THREAD_LOCAL_KEY] = hash
|
182
|
+
end
|
183
|
+
|
184
|
+
# Sets base and all +cells+ to the given value.
|
185
|
+
def internal_reset(initial_value)
|
186
|
+
current_cells = cells
|
187
|
+
self.base = initial_value
|
188
|
+
if current_cells
|
189
|
+
current_cells.each do |cell|
|
190
|
+
cell.value = initial_value if cell
|
191
|
+
end
|
192
|
+
end
|
193
|
+
end
|
194
|
+
|
195
|
+
def cas_base_computed
|
196
|
+
cas_base(current_base = base, yield(current_base))
|
197
|
+
end
|
198
|
+
|
199
|
+
def free?
|
200
|
+
!busy?
|
201
|
+
end
|
202
|
+
|
203
|
+
def try_initialize_cells(x, hash)
|
204
|
+
if free? && !cells
|
205
|
+
try_in_busy do
|
206
|
+
unless cells # Recheck under lock
|
207
|
+
new_cells = PowerOfTwoTuple.new(2)
|
208
|
+
new_cells.volatile_set_by_hash(hash, Cell.new(x))
|
209
|
+
self.cells = new_cells
|
210
|
+
end
|
211
|
+
end
|
212
|
+
end
|
213
|
+
end
|
214
|
+
|
215
|
+
def expand_table_unless_stale(current_cells)
|
216
|
+
try_in_busy do
|
217
|
+
if current_cells == cells # Recheck under lock
|
218
|
+
new_cells = current_cells.next_in_size_table
|
219
|
+
current_cells.each_with_index {|x, i| new_cells.volatile_set(i, x)}
|
220
|
+
self.cells = new_cells
|
221
|
+
end
|
222
|
+
end
|
223
|
+
end
|
224
|
+
|
225
|
+
def try_to_install_new_cell(new_cell, hash)
|
226
|
+
try_in_busy do
|
227
|
+
# Recheck under lock
|
228
|
+
if (current_cells = cells) && !current_cells.volatile_get(i = current_cells.hash_to_index(hash))
|
229
|
+
current_cells.volatile_set(i, new_cell)
|
230
|
+
end
|
231
|
+
end
|
232
|
+
end
|
233
|
+
|
234
|
+
def try_in_busy
|
235
|
+
if cas_busy(false, true)
|
236
|
+
begin
|
237
|
+
yield
|
238
|
+
ensure
|
239
|
+
self.busy = false
|
240
|
+
end
|
241
|
+
end
|
242
|
+
end
|
243
|
+
end
|
244
|
+
end
|
245
|
+
end
|
246
|
+
end
|
@@ -0,0 +1,75 @@
|
|
1
|
+
require 'concurrent/thread_safe/util'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
|
5
|
+
# @!visibility private
|
6
|
+
module ThreadSafe
|
7
|
+
|
8
|
+
# @!visibility private
|
9
|
+
module Util
|
10
|
+
|
11
|
+
# @!visibility private
|
12
|
+
module Volatile
|
13
|
+
|
14
|
+
# Provides +volatile+ (in the JVM's sense) attribute accessors implemented
|
15
|
+
# atop of +Concurrent::AtomicReference+.
|
16
|
+
#
|
17
|
+
# Usage:
|
18
|
+
# class Foo
|
19
|
+
# extend Concurrent::ThreadSafe::Util::Volatile
|
20
|
+
# attr_volatile :foo, :bar
|
21
|
+
#
|
22
|
+
# def initialize(bar)
|
23
|
+
# super() # must super() into parent initializers before using the volatile attribute accessors
|
24
|
+
# self.bar = bar
|
25
|
+
# end
|
26
|
+
#
|
27
|
+
# def hello
|
28
|
+
# my_foo = foo # volatile read
|
29
|
+
# self.foo = 1 # volatile write
|
30
|
+
# cas_foo(1, 2) # => true | a strong CAS
|
31
|
+
# end
|
32
|
+
# end
|
33
|
+
def attr_volatile(*attr_names)
|
34
|
+
return if attr_names.empty?
|
35
|
+
include(Module.new do
|
36
|
+
atomic_ref_setup = attr_names.map {|attr_name| "@__#{attr_name} = Concurrent::AtomicReference.new"}
|
37
|
+
initialize_copy_setup = attr_names.zip(atomic_ref_setup).map do |attr_name, ref_setup|
|
38
|
+
"#{ref_setup}(other.instance_variable_get(:@__#{attr_name}).get)"
|
39
|
+
end
|
40
|
+
class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1
|
41
|
+
def initialize(*)
|
42
|
+
super
|
43
|
+
#{atomic_ref_setup.join('; ')}
|
44
|
+
end
|
45
|
+
|
46
|
+
def initialize_copy(other)
|
47
|
+
super
|
48
|
+
#{initialize_copy_setup.join('; ')}
|
49
|
+
end
|
50
|
+
RUBY_EVAL
|
51
|
+
|
52
|
+
attr_names.each do |attr_name|
|
53
|
+
class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1
|
54
|
+
def #{attr_name}
|
55
|
+
@__#{attr_name}.get
|
56
|
+
end
|
57
|
+
|
58
|
+
def #{attr_name}=(value)
|
59
|
+
@__#{attr_name}.set(value)
|
60
|
+
end
|
61
|
+
|
62
|
+
def compare_and_set_#{attr_name}(old_value, new_value)
|
63
|
+
@__#{attr_name}.compare_and_set(old_value, new_value)
|
64
|
+
end
|
65
|
+
RUBY_EVAL
|
66
|
+
|
67
|
+
alias_method :"cas_#{attr_name}", :"compare_and_set_#{attr_name}"
|
68
|
+
alias_method :"lazy_set_#{attr_name}", :"#{attr_name}="
|
69
|
+
end
|
70
|
+
end)
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
@@ -0,0 +1,50 @@
|
|
1
|
+
require 'concurrent/thread_safe/util'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
|
5
|
+
# @!visibility private
|
6
|
+
module ThreadSafe
|
7
|
+
|
8
|
+
# @!visibility private
|
9
|
+
module Util
|
10
|
+
|
11
|
+
# A xorshift random number (positive +Fixnum+s) generator, provides
|
12
|
+
# reasonably cheap way to generate thread local random numbers without
|
13
|
+
# contending for the global +Kernel.rand+.
|
14
|
+
#
|
15
|
+
# Usage:
|
16
|
+
# x = XorShiftRandom.get # uses Kernel.rand to generate an initial seed
|
17
|
+
# while true
|
18
|
+
# if (x = XorShiftRandom.xorshift).odd? # thread-localy generate a next random number
|
19
|
+
# do_something_at_random
|
20
|
+
# end
|
21
|
+
# end
|
22
|
+
module XorShiftRandom
|
23
|
+
extend self
|
24
|
+
MAX_XOR_SHIFTABLE_INT = MAX_INT - 1
|
25
|
+
|
26
|
+
# Generates an initial non-zero positive +Fixnum+ via +Kernel.rand+.
|
27
|
+
def get
|
28
|
+
Kernel.rand(MAX_XOR_SHIFTABLE_INT) + 1 # 0 can't be xorshifted
|
29
|
+
end
|
30
|
+
|
31
|
+
# xorshift based on: http://www.jstatsoft.org/v08/i14/paper
|
32
|
+
if 0.size == 4
|
33
|
+
# using the "yˆ=y>>a; yˆ=y<<b; yˆ=y>>c;" transform with the (a,b,c) tuple with values (3,1,14) to minimise Bignum overflows
|
34
|
+
def xorshift(x)
|
35
|
+
x ^= x >> 3
|
36
|
+
x ^= (x << 1) & MAX_INT # cut-off Bignum overflow
|
37
|
+
x ^= x >> 14
|
38
|
+
end
|
39
|
+
else
|
40
|
+
# using the "yˆ=y>>a; yˆ=y<<b; yˆ=y>>c;" transform with the (a,b,c) tuple with values (1,1,54) to minimise Bignum overflows
|
41
|
+
def xorshift(x)
|
42
|
+
x ^= x >> 1
|
43
|
+
x ^= (x << 1) & MAX_INT # cut-off Bignum overflow
|
44
|
+
x ^= x >> 54
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
@@ -0,0 +1,334 @@
|
|
1
|
+
require 'concurrent/collection/copy_on_notify_observer_set'
|
2
|
+
require 'concurrent/concern/dereferenceable'
|
3
|
+
require 'concurrent/concern/observable'
|
4
|
+
require 'concurrent/atomic/atomic_boolean'
|
5
|
+
require 'concurrent/executor/executor_service'
|
6
|
+
require 'concurrent/executor/ruby_executor_service'
|
7
|
+
require 'concurrent/executor/safe_task_executor'
|
8
|
+
require 'concurrent/scheduled_task'
|
9
|
+
|
10
|
+
module Concurrent
|
11
|
+
|
12
|
+
# A very common concurrency pattern is to run a thread that performs a task at
|
13
|
+
# regular intervals. The thread that performs the task sleeps for the given
|
14
|
+
# interval then wakes up and performs the task. Lather, rinse, repeat... This
|
15
|
+
# pattern causes two problems. First, it is difficult to test the business
|
16
|
+
# logic of the task because the task itself is tightly coupled with the
|
17
|
+
# concurrency logic. Second, an exception raised while performing the task can
|
18
|
+
# cause the entire thread to abend. In a long-running application where the
|
19
|
+
# task thread is intended to run for days/weeks/years a crashed task thread
|
20
|
+
# can pose a significant problem. `TimerTask` alleviates both problems.
|
21
|
+
#
|
22
|
+
# When a `TimerTask` is launched it starts a thread for monitoring the
|
23
|
+
# execution interval. The `TimerTask` thread does not perform the task,
|
24
|
+
# however. Instead, the TimerTask launches the task on a separate thread.
|
25
|
+
# Should the task experience an unrecoverable crash only the task thread will
|
26
|
+
# crash. This makes the `TimerTask` very fault tolerant. Additionally, the
|
27
|
+
# `TimerTask` thread can respond to the success or failure of the task,
|
28
|
+
# performing logging or ancillary operations. `TimerTask` can also be
|
29
|
+
# configured with a timeout value allowing it to kill a task that runs too
|
30
|
+
# long.
|
31
|
+
#
|
32
|
+
# One other advantage of `TimerTask` is that it forces the business logic to
|
33
|
+
# be completely decoupled from the concurrency logic. The business logic can
|
34
|
+
# be tested separately then passed to the `TimerTask` for scheduling and
|
35
|
+
# running.
|
36
|
+
#
|
37
|
+
# In some cases it may be necessary for a `TimerTask` to affect its own
|
38
|
+
# execution cycle. To facilitate this, a reference to the TimerTask instance
|
39
|
+
# is passed as an argument to the provided block every time the task is
|
40
|
+
# executed.
|
41
|
+
#
|
42
|
+
# The `TimerTask` class includes the `Dereferenceable` mixin module so the
|
43
|
+
# result of the last execution is always available via the `#value` method.
|
44
|
+
# Dereferencing options can be passed to the `TimerTask` during construction or
|
45
|
+
# at any later time using the `#set_deref_options` method.
|
46
|
+
#
|
47
|
+
# `TimerTask` supports notification through the Ruby standard library
|
48
|
+
# {http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html
|
49
|
+
# Observable} module. On execution the `TimerTask` will notify the observers
|
50
|
+
# with three arguments: time of execution, the result of the block (or nil on
|
51
|
+
# failure), and any raised exceptions (or nil on success). If the timeout
|
52
|
+
# interval is exceeded the observer will receive a `Concurrent::TimeoutError`
|
53
|
+
# object as the third argument.
|
54
|
+
#
|
55
|
+
# @!macro copy_options
|
56
|
+
#
|
57
|
+
# @example Basic usage
|
58
|
+
# task = Concurrent::TimerTask.new{ puts 'Boom!' }
|
59
|
+
# task.execute
|
60
|
+
#
|
61
|
+
# task.execution_interval #=> 60 (default)
|
62
|
+
# task.timeout_interval #=> 30 (default)
|
63
|
+
#
|
64
|
+
# # wait 60 seconds...
|
65
|
+
# #=> 'Boom!'
|
66
|
+
#
|
67
|
+
# task.shutdown #=> true
|
68
|
+
#
|
69
|
+
# @example Configuring `:execution_interval` and `:timeout_interval`
|
70
|
+
# task = Concurrent::TimerTask.new(execution_interval: 5, timeout_interval: 5) do
|
71
|
+
# puts 'Boom!'
|
72
|
+
# end
|
73
|
+
#
|
74
|
+
# task.execution_interval #=> 5
|
75
|
+
# task.timeout_interval #=> 5
|
76
|
+
#
|
77
|
+
# @example Immediate execution with `:run_now`
|
78
|
+
# task = Concurrent::TimerTask.new(run_now: true){ puts 'Boom!' }
|
79
|
+
# task.execute
|
80
|
+
#
|
81
|
+
# #=> 'Boom!'
|
82
|
+
#
|
83
|
+
# @example Last `#value` and `Dereferenceable` mixin
|
84
|
+
# task = Concurrent::TimerTask.new(
|
85
|
+
# dup_on_deref: true,
|
86
|
+
# execution_interval: 5
|
87
|
+
# ){ Time.now }
|
88
|
+
#
|
89
|
+
# task.execute
|
90
|
+
# Time.now #=> 2013-11-07 18:06:50 -0500
|
91
|
+
# sleep(10)
|
92
|
+
# task.value #=> 2013-11-07 18:06:55 -0500
|
93
|
+
#
|
94
|
+
# @example Controlling execution from within the block
|
95
|
+
# timer_task = Concurrent::TimerTask.new(execution_interval: 1) do |task|
|
96
|
+
# task.execution_interval.times{ print 'Boom! ' }
|
97
|
+
# print "\n"
|
98
|
+
# task.execution_interval += 1
|
99
|
+
# if task.execution_interval > 5
|
100
|
+
# puts 'Stopping...'
|
101
|
+
# task.shutdown
|
102
|
+
# end
|
103
|
+
# end
|
104
|
+
#
|
105
|
+
# timer_task.execute # blocking call - this task will stop itself
|
106
|
+
# #=> Boom!
|
107
|
+
# #=> Boom! Boom!
|
108
|
+
# #=> Boom! Boom! Boom!
|
109
|
+
# #=> Boom! Boom! Boom! Boom!
|
110
|
+
# #=> Boom! Boom! Boom! Boom! Boom!
|
111
|
+
# #=> Stopping...
|
112
|
+
#
|
113
|
+
# @example Observation
|
114
|
+
# class TaskObserver
|
115
|
+
# def update(time, result, ex)
|
116
|
+
# if result
|
117
|
+
# print "(#{time}) Execution successfully returned #{result}\n"
|
118
|
+
# elsif ex.is_a?(Concurrent::TimeoutError)
|
119
|
+
# print "(#{time}) Execution timed out\n"
|
120
|
+
# else
|
121
|
+
# print "(#{time}) Execution failed with error #{ex}\n"
|
122
|
+
# end
|
123
|
+
# end
|
124
|
+
# end
|
125
|
+
#
|
126
|
+
# task = Concurrent::TimerTask.new(execution_interval: 1, timeout_interval: 1){ 42 }
|
127
|
+
# task.add_observer(TaskObserver.new)
|
128
|
+
# task.execute
|
129
|
+
# sleep 4
|
130
|
+
#
|
131
|
+
# #=> (2013-10-13 19:08:58 -0400) Execution successfully returned 42
|
132
|
+
# #=> (2013-10-13 19:08:59 -0400) Execution successfully returned 42
|
133
|
+
# #=> (2013-10-13 19:09:00 -0400) Execution successfully returned 42
|
134
|
+
# task.shutdown
|
135
|
+
#
|
136
|
+
# task = Concurrent::TimerTask.new(execution_interval: 1, timeout_interval: 1){ sleep }
|
137
|
+
# task.add_observer(TaskObserver.new)
|
138
|
+
# task.execute
|
139
|
+
#
|
140
|
+
# #=> (2013-10-13 19:07:25 -0400) Execution timed out
|
141
|
+
# #=> (2013-10-13 19:07:27 -0400) Execution timed out
|
142
|
+
# #=> (2013-10-13 19:07:29 -0400) Execution timed out
|
143
|
+
# task.shutdown
|
144
|
+
#
|
145
|
+
# task = Concurrent::TimerTask.new(execution_interval: 1){ raise StandardError }
|
146
|
+
# task.add_observer(TaskObserver.new)
|
147
|
+
# task.execute
|
148
|
+
#
|
149
|
+
# #=> (2013-10-13 19:09:37 -0400) Execution failed with error StandardError
|
150
|
+
# #=> (2013-10-13 19:09:38 -0400) Execution failed with error StandardError
|
151
|
+
# #=> (2013-10-13 19:09:39 -0400) Execution failed with error StandardError
|
152
|
+
# task.shutdown
|
153
|
+
#
|
154
|
+
# @see http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html
|
155
|
+
# @see http://docs.oracle.com/javase/7/docs/api/java/util/TimerTask.html
|
156
|
+
class TimerTask < RubyExecutorService
|
157
|
+
include Concern::Dereferenceable
|
158
|
+
include Concern::Observable
|
159
|
+
|
160
|
+
# Default `:execution_interval` in seconds.
|
161
|
+
EXECUTION_INTERVAL = 60
|
162
|
+
|
163
|
+
# Default `:timeout_interval` in seconds.
|
164
|
+
TIMEOUT_INTERVAL = 30
|
165
|
+
|
166
|
+
# Create a new TimerTask with the given task and configuration.
|
167
|
+
#
|
168
|
+
# @!macro timer_task_initialize
|
169
|
+
# @param [Hash] opts the options defining task execution.
|
170
|
+
# @option opts [Integer] :execution_interval number of seconds between
|
171
|
+
# task executions (default: EXECUTION_INTERVAL)
|
172
|
+
# @option opts [Integer] :timeout_interval number of seconds a task can
|
173
|
+
# run before it is considered to have failed (default: TIMEOUT_INTERVAL)
|
174
|
+
# @option opts [Boolean] :run_now Whether to run the task immediately
|
175
|
+
# upon instantiation or to wait until the first # execution_interval
|
176
|
+
# has passed (default: false)
|
177
|
+
#
|
178
|
+
# @!macro deref_options
|
179
|
+
#
|
180
|
+
# @raise ArgumentError when no block is given.
|
181
|
+
#
|
182
|
+
# @yield to the block after :execution_interval seconds have passed since
|
183
|
+
# the last yield
|
184
|
+
# @yieldparam task a reference to the `TimerTask` instance so that the
|
185
|
+
# block can control its own lifecycle. Necessary since `self` will
|
186
|
+
# refer to the execution context of the block rather than the running
|
187
|
+
# `TimerTask`.
|
188
|
+
#
|
189
|
+
# @return [TimerTask] the new `TimerTask`
|
190
|
+
def initialize(opts = {}, &task)
|
191
|
+
raise ArgumentError.new('no block given') unless block_given?
|
192
|
+
super
|
193
|
+
set_deref_options opts
|
194
|
+
end
|
195
|
+
|
196
|
+
# Is the executor running?
|
197
|
+
#
|
198
|
+
# @return [Boolean] `true` when running, `false` when shutting down or shutdown
|
199
|
+
def running?
|
200
|
+
@running.true?
|
201
|
+
end
|
202
|
+
|
203
|
+
# Execute a previously created `TimerTask`.
|
204
|
+
#
|
205
|
+
# @return [TimerTask] a reference to `self`
|
206
|
+
#
|
207
|
+
# @example Instance and execute in separate steps
|
208
|
+
# task = Concurrent::TimerTask.new(execution_interval: 10){ print "Hello World\n" }
|
209
|
+
# task.running? #=> false
|
210
|
+
# task.execute
|
211
|
+
# task.running? #=> true
|
212
|
+
#
|
213
|
+
# @example Instance and execute in one line
|
214
|
+
# task = Concurrent::TimerTask.new(execution_interval: 10){ print "Hello World\n" }.execute
|
215
|
+
# task.running? #=> true
|
216
|
+
def execute
|
217
|
+
synchronize do
|
218
|
+
if @running.false?
|
219
|
+
@running.make_true
|
220
|
+
schedule_next_task(@run_now ? 0 : @execution_interval)
|
221
|
+
end
|
222
|
+
end
|
223
|
+
self
|
224
|
+
end
|
225
|
+
|
226
|
+
# Create and execute a new `TimerTask`.
|
227
|
+
#
|
228
|
+
# @!macro timer_task_initialize
|
229
|
+
#
|
230
|
+
# @example
|
231
|
+
# task = Concurrent::TimerTask.execute(execution_interval: 10){ print "Hello World\n" }
|
232
|
+
# task.running? #=> true
|
233
|
+
def self.execute(opts = {}, &task)
|
234
|
+
TimerTask.new(opts, &task).execute
|
235
|
+
end
|
236
|
+
|
237
|
+
# @!attribute [rw] execution_interval
|
238
|
+
# @return [Fixnum] Number of seconds after the task completes before the
|
239
|
+
# task is performed again.
|
240
|
+
def execution_interval
|
241
|
+
synchronize { @execution_interval }
|
242
|
+
end
|
243
|
+
|
244
|
+
# @!attribute [rw] execution_interval
|
245
|
+
# @return [Fixnum] Number of seconds after the task completes before the
|
246
|
+
# task is performed again.
|
247
|
+
def execution_interval=(value)
|
248
|
+
if (value = value.to_f) <= 0.0
|
249
|
+
raise ArgumentError.new('must be greater than zero')
|
250
|
+
else
|
251
|
+
synchronize { @execution_interval = value }
|
252
|
+
end
|
253
|
+
end
|
254
|
+
|
255
|
+
# @!attribute [rw] timeout_interval
|
256
|
+
# @return [Fixnum] Number of seconds the task can run before it is
|
257
|
+
# considered to have failed.
|
258
|
+
def timeout_interval
|
259
|
+
synchronize { @timeout_interval }
|
260
|
+
end
|
261
|
+
|
262
|
+
# @!attribute [rw] timeout_interval
|
263
|
+
# @return [Fixnum] Number of seconds the task can run before it is
|
264
|
+
# considered to have failed.
|
265
|
+
def timeout_interval=(value)
|
266
|
+
if (value = value.to_f) <= 0.0
|
267
|
+
raise ArgumentError.new('must be greater than zero')
|
268
|
+
else
|
269
|
+
synchronize { @timeout_interval = value }
|
270
|
+
end
|
271
|
+
end
|
272
|
+
|
273
|
+
private :post, :<<
|
274
|
+
|
275
|
+
private
|
276
|
+
|
277
|
+
def ns_initialize(opts, &task)
|
278
|
+
set_deref_options(opts)
|
279
|
+
|
280
|
+
self.execution_interval = opts[:execution] || opts[:execution_interval] || EXECUTION_INTERVAL
|
281
|
+
self.timeout_interval = opts[:timeout] || opts[:timeout_interval] || TIMEOUT_INTERVAL
|
282
|
+
@run_now = opts[:now] || opts[:run_now]
|
283
|
+
@executor = Concurrent::SafeTaskExecutor.new(task)
|
284
|
+
@running = Concurrent::AtomicBoolean.new(false)
|
285
|
+
@value = nil
|
286
|
+
|
287
|
+
self.observers = Collection::CopyOnNotifyObserverSet.new
|
288
|
+
end
|
289
|
+
|
290
|
+
# @!visibility private
|
291
|
+
def ns_shutdown_execution
|
292
|
+
@running.make_false
|
293
|
+
super
|
294
|
+
end
|
295
|
+
|
296
|
+
# @!visibility private
|
297
|
+
def ns_kill_execution
|
298
|
+
@running.make_false
|
299
|
+
super
|
300
|
+
end
|
301
|
+
|
302
|
+
# @!visibility private
|
303
|
+
def schedule_next_task(interval = execution_interval)
|
304
|
+
ScheduledTask.execute(interval, args: [Concurrent::Event.new], &method(:execute_task))
|
305
|
+
nil
|
306
|
+
end
|
307
|
+
|
308
|
+
# @!visibility private
|
309
|
+
def execute_task(completion)
|
310
|
+
return nil unless @running.true?
|
311
|
+
ScheduledTask.execute(timeout_interval, args: [completion], &method(:timeout_task))
|
312
|
+
_success, value, reason = @executor.execute(self)
|
313
|
+
if completion.try?
|
314
|
+
self.value = value
|
315
|
+
schedule_next_task
|
316
|
+
time = Time.now
|
317
|
+
observers.notify_observers do
|
318
|
+
[time, self.value, reason]
|
319
|
+
end
|
320
|
+
end
|
321
|
+
nil
|
322
|
+
end
|
323
|
+
|
324
|
+
# @!visibility private
|
325
|
+
def timeout_task(completion)
|
326
|
+
return unless @running.true?
|
327
|
+
if completion.try?
|
328
|
+
self.value = value
|
329
|
+
schedule_next_task
|
330
|
+
observers.notify_observers(Time.now, nil, Concurrent::TimeoutError.new)
|
331
|
+
end
|
332
|
+
end
|
333
|
+
end
|
334
|
+
end
|