o-concurrent-ruby 1.1.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +542 -0
- data/Gemfile +37 -0
- data/LICENSE.txt +21 -0
- data/README.md +404 -0
- data/Rakefile +307 -0
- data/ext/concurrent-ruby/ConcurrentRubyService.java +17 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/AtomicReferenceLibrary.java +175 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JRubyMapBackendLibrary.java +248 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicBooleanLibrary.java +93 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java +113 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java +189 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/SynchronizationLibrary.java +307 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMap.java +31 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java +3863 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/LongAdder.java +203 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/Striped64.java +342 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java +3800 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/LongAdder.java +204 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/Striped64.java +291 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166y/ThreadLocalRandom.java +199 -0
- data/lib/concurrent-ruby/concurrent/agent.rb +587 -0
- data/lib/concurrent-ruby/concurrent/array.rb +66 -0
- data/lib/concurrent-ruby/concurrent/async.rb +449 -0
- data/lib/concurrent-ruby/concurrent/atom.rb +222 -0
- data/lib/concurrent-ruby/concurrent/atomic/abstract_thread_local_var.rb +66 -0
- data/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb +126 -0
- data/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb +143 -0
- data/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb +164 -0
- data/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb +205 -0
- data/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb +100 -0
- data/lib/concurrent-ruby/concurrent/atomic/cyclic_barrier.rb +128 -0
- data/lib/concurrent-ruby/concurrent/atomic/event.rb +109 -0
- data/lib/concurrent-ruby/concurrent/atomic/java_count_down_latch.rb +42 -0
- data/lib/concurrent-ruby/concurrent/atomic/java_thread_local_var.rb +37 -0
- data/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_boolean.rb +62 -0
- data/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb +75 -0
- data/lib/concurrent-ruby/concurrent/atomic/mutex_count_down_latch.rb +44 -0
- data/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb +131 -0
- data/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb +254 -0
- data/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb +377 -0
- data/lib/concurrent-ruby/concurrent/atomic/ruby_thread_local_var.rb +181 -0
- data/lib/concurrent-ruby/concurrent/atomic/semaphore.rb +166 -0
- data/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb +104 -0
- data/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb +56 -0
- data/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb +28 -0
- data/lib/concurrent-ruby/concurrent/atomics.rb +10 -0
- data/lib/concurrent-ruby/concurrent/collection/copy_on_notify_observer_set.rb +107 -0
- data/lib/concurrent-ruby/concurrent/collection/copy_on_write_observer_set.rb +111 -0
- data/lib/concurrent-ruby/concurrent/collection/java_non_concurrent_priority_queue.rb +84 -0
- data/lib/concurrent-ruby/concurrent/collection/lock_free_stack.rb +158 -0
- data/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb +927 -0
- data/lib/concurrent-ruby/concurrent/collection/map/mri_map_backend.rb +66 -0
- data/lib/concurrent-ruby/concurrent/collection/map/non_concurrent_map_backend.rb +140 -0
- data/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb +82 -0
- data/lib/concurrent-ruby/concurrent/collection/map/truffleruby_map_backend.rb +14 -0
- data/lib/concurrent-ruby/concurrent/collection/non_concurrent_priority_queue.rb +143 -0
- data/lib/concurrent-ruby/concurrent/collection/ruby_non_concurrent_priority_queue.rb +160 -0
- data/lib/concurrent-ruby/concurrent/concern/deprecation.rb +34 -0
- data/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb +73 -0
- data/lib/concurrent-ruby/concurrent/concern/logging.rb +32 -0
- data/lib/concurrent-ruby/concurrent/concern/obligation.rb +220 -0
- data/lib/concurrent-ruby/concurrent/concern/observable.rb +110 -0
- data/lib/concurrent-ruby/concurrent/configuration.rb +188 -0
- data/lib/concurrent-ruby/concurrent/constants.rb +8 -0
- data/lib/concurrent-ruby/concurrent/dataflow.rb +81 -0
- data/lib/concurrent-ruby/concurrent/delay.rb +199 -0
- data/lib/concurrent-ruby/concurrent/errors.rb +69 -0
- data/lib/concurrent-ruby/concurrent/exchanger.rb +352 -0
- data/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb +131 -0
- data/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb +62 -0
- data/lib/concurrent-ruby/concurrent/executor/executor_service.rb +185 -0
- data/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb +220 -0
- data/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb +66 -0
- data/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb +44 -0
- data/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb +103 -0
- data/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb +30 -0
- data/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb +140 -0
- data/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb +82 -0
- data/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb +21 -0
- data/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb +368 -0
- data/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb +35 -0
- data/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb +34 -0
- data/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb +107 -0
- data/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb +28 -0
- data/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb +100 -0
- data/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb +57 -0
- data/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb +88 -0
- data/lib/concurrent-ruby/concurrent/executor/timer_set.rb +172 -0
- data/lib/concurrent-ruby/concurrent/executors.rb +20 -0
- data/lib/concurrent-ruby/concurrent/future.rb +141 -0
- data/lib/concurrent-ruby/concurrent/hash.rb +59 -0
- data/lib/concurrent-ruby/concurrent/immutable_struct.rb +101 -0
- data/lib/concurrent-ruby/concurrent/ivar.rb +207 -0
- data/lib/concurrent-ruby/concurrent/map.rb +346 -0
- data/lib/concurrent-ruby/concurrent/maybe.rb +229 -0
- data/lib/concurrent-ruby/concurrent/mutable_struct.rb +239 -0
- data/lib/concurrent-ruby/concurrent/mvar.rb +242 -0
- data/lib/concurrent-ruby/concurrent/options.rb +42 -0
- data/lib/concurrent-ruby/concurrent/promise.rb +580 -0
- data/lib/concurrent-ruby/concurrent/promises.rb +2167 -0
- data/lib/concurrent-ruby/concurrent/re_include.rb +58 -0
- data/lib/concurrent-ruby/concurrent/scheduled_task.rb +331 -0
- data/lib/concurrent-ruby/concurrent/set.rb +74 -0
- data/lib/concurrent-ruby/concurrent/settable_struct.rb +139 -0
- data/lib/concurrent-ruby/concurrent/synchronization/abstract_lockable_object.rb +98 -0
- data/lib/concurrent-ruby/concurrent/synchronization/abstract_object.rb +24 -0
- data/lib/concurrent-ruby/concurrent/synchronization/abstract_struct.rb +171 -0
- data/lib/concurrent-ruby/concurrent/synchronization/condition.rb +60 -0
- data/lib/concurrent-ruby/concurrent/synchronization/jruby_lockable_object.rb +13 -0
- data/lib/concurrent-ruby/concurrent/synchronization/jruby_object.rb +45 -0
- data/lib/concurrent-ruby/concurrent/synchronization/lock.rb +36 -0
- data/lib/concurrent-ruby/concurrent/synchronization/lockable_object.rb +72 -0
- data/lib/concurrent-ruby/concurrent/synchronization/mri_object.rb +44 -0
- data/lib/concurrent-ruby/concurrent/synchronization/mutex_lockable_object.rb +88 -0
- data/lib/concurrent-ruby/concurrent/synchronization/object.rb +183 -0
- data/lib/concurrent-ruby/concurrent/synchronization/rbx_lockable_object.rb +71 -0
- data/lib/concurrent-ruby/concurrent/synchronization/rbx_object.rb +49 -0
- data/lib/concurrent-ruby/concurrent/synchronization/truffleruby_object.rb +47 -0
- data/lib/concurrent-ruby/concurrent/synchronization/volatile.rb +36 -0
- data/lib/concurrent-ruby/concurrent/synchronization.rb +30 -0
- data/lib/concurrent-ruby/concurrent/thread_safe/synchronized_delegator.rb +50 -0
- data/lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb +74 -0
- data/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb +118 -0
- data/lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb +88 -0
- data/lib/concurrent-ruby/concurrent/thread_safe/util/power_of_two_tuple.rb +38 -0
- data/lib/concurrent-ruby/concurrent/thread_safe/util/striped64.rb +246 -0
- data/lib/concurrent-ruby/concurrent/thread_safe/util/volatile.rb +75 -0
- data/lib/concurrent-ruby/concurrent/thread_safe/util/xor_shift_random.rb +50 -0
- data/lib/concurrent-ruby/concurrent/thread_safe/util.rb +16 -0
- data/lib/concurrent-ruby/concurrent/timer_task.rb +311 -0
- data/lib/concurrent-ruby/concurrent/tuple.rb +86 -0
- data/lib/concurrent-ruby/concurrent/tvar.rb +221 -0
- data/lib/concurrent-ruby/concurrent/utility/engine.rb +56 -0
- data/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb +90 -0
- data/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb +79 -0
- data/lib/concurrent-ruby/concurrent/utility/native_integer.rb +53 -0
- data/lib/concurrent-ruby/concurrent/utility/processor_counter.rb +130 -0
- data/lib/concurrent-ruby/concurrent/version.rb +3 -0
- data/lib/concurrent-ruby/concurrent-ruby.rb +5 -0
- data/lib/concurrent-ruby/concurrent.rb +134 -0
- metadata +192 -0
@@ -0,0 +1,377 @@
|
|
1
|
+
require 'thread'
|
2
|
+
require 'concurrent/atomic/atomic_reference'
|
3
|
+
require 'concurrent/errors'
|
4
|
+
require 'concurrent/synchronization'
|
5
|
+
require 'concurrent/atomic/thread_local_var'
|
6
|
+
|
7
|
+
module Concurrent
|
8
|
+
|
9
|
+
# Re-entrant read-write lock implementation
|
10
|
+
#
|
11
|
+
# Allows any number of concurrent readers, but only one concurrent writer
|
12
|
+
# (And while the "write" lock is taken, no read locks can be obtained either.
|
13
|
+
# Hence, the write lock can also be called an "exclusive" lock.)
|
14
|
+
#
|
15
|
+
# If another thread has taken a read lock, any thread which wants a write lock
|
16
|
+
# will block until all the readers release their locks. However, once a thread
|
17
|
+
# starts waiting to obtain a write lock, any additional readers that come along
|
18
|
+
# will also wait (so writers are not starved).
|
19
|
+
#
|
20
|
+
# A thread can acquire both a read and write lock at the same time. A thread can
|
21
|
+
# also acquire a read lock OR a write lock more than once. Only when the read (or
|
22
|
+
# write) lock is released as many times as it was acquired, will the thread
|
23
|
+
# actually let it go, allowing other threads which might have been waiting
|
24
|
+
# to proceed. Therefore the lock can be upgraded by first acquiring
|
25
|
+
# read lock and then write lock and that the lock can be downgraded by first
|
26
|
+
# having both read and write lock a releasing just the write lock.
|
27
|
+
#
|
28
|
+
# If both read and write locks are acquired by the same thread, it is not strictly
|
29
|
+
# necessary to release them in the same order they were acquired. In other words,
|
30
|
+
# the following code is legal:
|
31
|
+
#
|
32
|
+
# @example
|
33
|
+
# lock = Concurrent::ReentrantReadWriteLock.new
|
34
|
+
# lock.acquire_write_lock
|
35
|
+
# lock.acquire_read_lock
|
36
|
+
# lock.release_write_lock
|
37
|
+
# # At this point, the current thread is holding only a read lock, not a write
|
38
|
+
# # lock. So other threads can take read locks, but not a write lock.
|
39
|
+
# lock.release_read_lock
|
40
|
+
# # Now the current thread is not holding either a read or write lock, so
|
41
|
+
# # another thread could potentially acquire a write lock.
|
42
|
+
#
|
43
|
+
# This implementation was inspired by `java.util.concurrent.ReentrantReadWriteLock`.
|
44
|
+
#
|
45
|
+
# @example
|
46
|
+
# lock = Concurrent::ReentrantReadWriteLock.new
|
47
|
+
# lock.with_read_lock { data.retrieve }
|
48
|
+
# lock.with_write_lock { data.modify! }
|
49
|
+
#
|
50
|
+
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html java.util.concurrent.ReentrantReadWriteLock
|
51
|
+
class ReentrantReadWriteLock < Synchronization::Object
|
52
|
+
|
53
|
+
# Implementation notes:
|
54
|
+
#
|
55
|
+
# A goal is to make the uncontended path for both readers/writers mutex-free
|
56
|
+
# Only if there is reader-writer or writer-writer contention, should mutexes be used
|
57
|
+
# Otherwise, a single CAS operation is all we need to acquire/release a lock
|
58
|
+
#
|
59
|
+
# Internal state is represented by a single integer ("counter"), and updated
|
60
|
+
# using atomic compare-and-swap operations
|
61
|
+
# When the counter is 0, the lock is free
|
62
|
+
# Each thread which has one OR MORE read locks increments the counter by 1
|
63
|
+
# (and decrements by 1 when releasing the read lock)
|
64
|
+
# The counter is increased by (1 << 15) for each writer waiting to acquire the
|
65
|
+
# write lock, and by (1 << 29) if the write lock is taken
|
66
|
+
#
|
67
|
+
# Additionally, each thread uses a thread-local variable to count how many times
|
68
|
+
# it has acquired a read lock, AND how many times it has acquired a write lock.
|
69
|
+
# It uses a similar trick; an increment of 1 means a read lock was taken, and
|
70
|
+
# an increment of (1 << 15) means a write lock was taken
|
71
|
+
# This is what makes re-entrancy possible
|
72
|
+
#
|
73
|
+
# 2 rules are followed to ensure good liveness properties:
|
74
|
+
# 1) Once a writer has queued up and is waiting for a write lock, no other thread
|
75
|
+
# can take a lock without waiting
|
76
|
+
# 2) When a write lock is released, readers are given the "first chance" to wake
|
77
|
+
# up and acquire a read lock
|
78
|
+
# Following these rules means readers and writers tend to "take turns", so neither
|
79
|
+
# can starve the other, even under heavy contention
|
80
|
+
|
81
|
+
# @!visibility private
|
82
|
+
READER_BITS = 15
|
83
|
+
# @!visibility private
|
84
|
+
WRITER_BITS = 14
|
85
|
+
|
86
|
+
# Used with @Counter:
|
87
|
+
# @!visibility private
|
88
|
+
WAITING_WRITER = 1 << READER_BITS
|
89
|
+
# @!visibility private
|
90
|
+
RUNNING_WRITER = 1 << (READER_BITS + WRITER_BITS)
|
91
|
+
# @!visibility private
|
92
|
+
MAX_READERS = WAITING_WRITER - 1
|
93
|
+
# @!visibility private
|
94
|
+
MAX_WRITERS = RUNNING_WRITER - MAX_READERS - 1
|
95
|
+
|
96
|
+
# Used with @HeldCount:
|
97
|
+
# @!visibility private
|
98
|
+
WRITE_LOCK_HELD = 1 << READER_BITS
|
99
|
+
# @!visibility private
|
100
|
+
READ_LOCK_MASK = WRITE_LOCK_HELD - 1
|
101
|
+
# @!visibility private
|
102
|
+
WRITE_LOCK_MASK = MAX_WRITERS
|
103
|
+
|
104
|
+
safe_initialization!
|
105
|
+
|
106
|
+
# Create a new `ReentrantReadWriteLock` in the unlocked state.
|
107
|
+
def initialize
|
108
|
+
super()
|
109
|
+
@Counter = AtomicFixnum.new(0) # single integer which represents lock state
|
110
|
+
@ReadQueue = Synchronization::Lock.new # used to queue waiting readers
|
111
|
+
@WriteQueue = Synchronization::Lock.new # used to queue waiting writers
|
112
|
+
@HeldCount = ThreadLocalVar.new(0) # indicates # of R & W locks held by this thread
|
113
|
+
end
|
114
|
+
|
115
|
+
# Execute a block operation within a read lock.
|
116
|
+
#
|
117
|
+
# @yield the task to be performed within the lock.
|
118
|
+
#
|
119
|
+
# @return [Object] the result of the block operation.
|
120
|
+
#
|
121
|
+
# @raise [ArgumentError] when no block is given.
|
122
|
+
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
|
123
|
+
# is exceeded.
|
124
|
+
def with_read_lock
|
125
|
+
raise ArgumentError.new('no block given') unless block_given?
|
126
|
+
acquire_read_lock
|
127
|
+
begin
|
128
|
+
yield
|
129
|
+
ensure
|
130
|
+
release_read_lock
|
131
|
+
end
|
132
|
+
end
|
133
|
+
|
134
|
+
# Execute a block operation within a write lock.
|
135
|
+
#
|
136
|
+
# @yield the task to be performed within the lock.
|
137
|
+
#
|
138
|
+
# @return [Object] the result of the block operation.
|
139
|
+
#
|
140
|
+
# @raise [ArgumentError] when no block is given.
|
141
|
+
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
|
142
|
+
# is exceeded.
|
143
|
+
def with_write_lock
|
144
|
+
raise ArgumentError.new('no block given') unless block_given?
|
145
|
+
acquire_write_lock
|
146
|
+
begin
|
147
|
+
yield
|
148
|
+
ensure
|
149
|
+
release_write_lock
|
150
|
+
end
|
151
|
+
end
|
152
|
+
|
153
|
+
# Acquire a read lock. If a write lock is held by another thread, will block
|
154
|
+
# until it is released.
|
155
|
+
#
|
156
|
+
# @return [Boolean] true if the lock is successfully acquired
|
157
|
+
#
|
158
|
+
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
|
159
|
+
# is exceeded.
|
160
|
+
def acquire_read_lock
|
161
|
+
if (held = @HeldCount.value) > 0
|
162
|
+
# If we already have a lock, there's no need to wait
|
163
|
+
if held & READ_LOCK_MASK == 0
|
164
|
+
# But we do need to update the counter, if we were holding a write
|
165
|
+
# lock but not a read lock
|
166
|
+
@Counter.update { |c| c + 1 }
|
167
|
+
end
|
168
|
+
@HeldCount.value = held + 1
|
169
|
+
return true
|
170
|
+
end
|
171
|
+
|
172
|
+
while true
|
173
|
+
c = @Counter.value
|
174
|
+
raise ResourceLimitError.new('Too many reader threads') if max_readers?(c)
|
175
|
+
|
176
|
+
# If a writer is waiting OR running when we first queue up, we need to wait
|
177
|
+
if waiting_or_running_writer?(c)
|
178
|
+
# Before going to sleep, check again with the ReadQueue mutex held
|
179
|
+
@ReadQueue.synchronize do
|
180
|
+
@ReadQueue.ns_wait if waiting_or_running_writer?
|
181
|
+
end
|
182
|
+
# Note: the above 'synchronize' block could have used #wait_until,
|
183
|
+
# but that waits repeatedly in a loop, checking the wait condition
|
184
|
+
# each time it wakes up (to protect against spurious wakeups)
|
185
|
+
# But we are already in a loop, which is only broken when we successfully
|
186
|
+
# acquire the lock! So we don't care about spurious wakeups, and would
|
187
|
+
# rather not pay the extra overhead of using #wait_until
|
188
|
+
|
189
|
+
# After a reader has waited once, they are allowed to "barge" ahead of waiting writers
|
190
|
+
# But if a writer is *running*, the reader still needs to wait (naturally)
|
191
|
+
while true
|
192
|
+
c = @Counter.value
|
193
|
+
if running_writer?(c)
|
194
|
+
@ReadQueue.synchronize do
|
195
|
+
@ReadQueue.ns_wait if running_writer?
|
196
|
+
end
|
197
|
+
elsif @Counter.compare_and_set(c, c+1)
|
198
|
+
@HeldCount.value = held + 1
|
199
|
+
return true
|
200
|
+
end
|
201
|
+
end
|
202
|
+
elsif @Counter.compare_and_set(c, c+1)
|
203
|
+
@HeldCount.value = held + 1
|
204
|
+
return true
|
205
|
+
end
|
206
|
+
end
|
207
|
+
end
|
208
|
+
|
209
|
+
# Try to acquire a read lock and return true if we succeed. If it cannot be
|
210
|
+
# acquired immediately, return false.
|
211
|
+
#
|
212
|
+
# @return [Boolean] true if the lock is successfully acquired
|
213
|
+
def try_read_lock
|
214
|
+
if (held = @HeldCount.value) > 0
|
215
|
+
if held & READ_LOCK_MASK == 0
|
216
|
+
# If we hold a write lock, but not a read lock...
|
217
|
+
@Counter.update { |c| c + 1 }
|
218
|
+
end
|
219
|
+
@HeldCount.value = held + 1
|
220
|
+
return true
|
221
|
+
else
|
222
|
+
c = @Counter.value
|
223
|
+
if !waiting_or_running_writer?(c) && @Counter.compare_and_set(c, c+1)
|
224
|
+
@HeldCount.value = held + 1
|
225
|
+
return true
|
226
|
+
end
|
227
|
+
end
|
228
|
+
false
|
229
|
+
end
|
230
|
+
|
231
|
+
# Release a previously acquired read lock.
|
232
|
+
#
|
233
|
+
# @return [Boolean] true if the lock is successfully released
|
234
|
+
def release_read_lock
|
235
|
+
held = @HeldCount.value = @HeldCount.value - 1
|
236
|
+
rlocks_held = held & READ_LOCK_MASK
|
237
|
+
if rlocks_held == 0
|
238
|
+
c = @Counter.update { |counter| counter - 1 }
|
239
|
+
# If one or more writers were waiting, and we were the last reader, wake a writer up
|
240
|
+
if waiting_or_running_writer?(c) && running_readers(c) == 0
|
241
|
+
@WriteQueue.signal
|
242
|
+
end
|
243
|
+
elsif rlocks_held == READ_LOCK_MASK
|
244
|
+
raise IllegalOperationError, "Cannot release a read lock which is not held"
|
245
|
+
end
|
246
|
+
true
|
247
|
+
end
|
248
|
+
|
249
|
+
# Acquire a write lock. Will block and wait for all active readers and writers.
|
250
|
+
#
|
251
|
+
# @return [Boolean] true if the lock is successfully acquired
|
252
|
+
#
|
253
|
+
# @raise [Concurrent::ResourceLimitError] if the maximum number of writers
|
254
|
+
# is exceeded.
|
255
|
+
def acquire_write_lock
|
256
|
+
if (held = @HeldCount.value) >= WRITE_LOCK_HELD
|
257
|
+
# if we already have a write (exclusive) lock, there's no need to wait
|
258
|
+
@HeldCount.value = held + WRITE_LOCK_HELD
|
259
|
+
return true
|
260
|
+
end
|
261
|
+
|
262
|
+
while true
|
263
|
+
c = @Counter.value
|
264
|
+
raise ResourceLimitError.new('Too many writer threads') if max_writers?(c)
|
265
|
+
|
266
|
+
# To go ahead and take the lock without waiting, there must be no writer
|
267
|
+
# running right now, AND no writers who came before us still waiting to
|
268
|
+
# acquire the lock
|
269
|
+
# Additionally, if any read locks have been taken, we must hold all of them
|
270
|
+
if held > 0 && @Counter.compare_and_set(1, c+RUNNING_WRITER)
|
271
|
+
# If we are the only one reader and successfully swap the RUNNING_WRITER bit on, then we can go ahead
|
272
|
+
@HeldCount.value = held + WRITE_LOCK_HELD
|
273
|
+
return true
|
274
|
+
elsif @Counter.compare_and_set(c, c+WAITING_WRITER)
|
275
|
+
while true
|
276
|
+
# Now we have successfully incremented, so no more readers will be able to increment
|
277
|
+
# (they will wait instead)
|
278
|
+
# However, readers OR writers could decrement right here
|
279
|
+
@WriteQueue.synchronize do
|
280
|
+
# So we have to do another check inside the synchronized section
|
281
|
+
# If a writer OR another reader is running, then go to sleep
|
282
|
+
c = @Counter.value
|
283
|
+
@WriteQueue.ns_wait if running_writer?(c) || running_readers(c) != held
|
284
|
+
end
|
285
|
+
# Note: if you are thinking of replacing the above 'synchronize' block
|
286
|
+
# with #wait_until, read the comment in #acquire_read_lock first!
|
287
|
+
|
288
|
+
# We just came out of a wait
|
289
|
+
# If we successfully turn the RUNNING_WRITER bit on with an atomic swap,
|
290
|
+
# then we are OK to stop waiting and go ahead
|
291
|
+
# Otherwise go back and wait again
|
292
|
+
c = @Counter.value
|
293
|
+
if !running_writer?(c) &&
|
294
|
+
running_readers(c) == held &&
|
295
|
+
@Counter.compare_and_set(c, c+RUNNING_WRITER-WAITING_WRITER)
|
296
|
+
@HeldCount.value = held + WRITE_LOCK_HELD
|
297
|
+
return true
|
298
|
+
end
|
299
|
+
end
|
300
|
+
end
|
301
|
+
end
|
302
|
+
end
|
303
|
+
|
304
|
+
# Try to acquire a write lock and return true if we succeed. If it cannot be
|
305
|
+
# acquired immediately, return false.
|
306
|
+
#
|
307
|
+
# @return [Boolean] true if the lock is successfully acquired
|
308
|
+
def try_write_lock
|
309
|
+
if (held = @HeldCount.value) >= WRITE_LOCK_HELD
|
310
|
+
@HeldCount.value = held + WRITE_LOCK_HELD
|
311
|
+
return true
|
312
|
+
else
|
313
|
+
c = @Counter.value
|
314
|
+
if !waiting_or_running_writer?(c) &&
|
315
|
+
running_readers(c) == held &&
|
316
|
+
@Counter.compare_and_set(c, c+RUNNING_WRITER)
|
317
|
+
@HeldCount.value = held + WRITE_LOCK_HELD
|
318
|
+
return true
|
319
|
+
end
|
320
|
+
end
|
321
|
+
false
|
322
|
+
end
|
323
|
+
|
324
|
+
# Release a previously acquired write lock.
|
325
|
+
#
|
326
|
+
# @return [Boolean] true if the lock is successfully released
|
327
|
+
def release_write_lock
|
328
|
+
held = @HeldCount.value = @HeldCount.value - WRITE_LOCK_HELD
|
329
|
+
wlocks_held = held & WRITE_LOCK_MASK
|
330
|
+
if wlocks_held == 0
|
331
|
+
c = @Counter.update { |counter| counter - RUNNING_WRITER }
|
332
|
+
@ReadQueue.broadcast
|
333
|
+
@WriteQueue.signal if waiting_writers(c) > 0
|
334
|
+
elsif wlocks_held == WRITE_LOCK_MASK
|
335
|
+
raise IllegalOperationError, "Cannot release a write lock which is not held"
|
336
|
+
end
|
337
|
+
true
|
338
|
+
end
|
339
|
+
|
340
|
+
private
|
341
|
+
|
342
|
+
# @!visibility private
|
343
|
+
def running_readers(c = @Counter.value)
|
344
|
+
c & MAX_READERS
|
345
|
+
end
|
346
|
+
|
347
|
+
# @!visibility private
|
348
|
+
def running_readers?(c = @Counter.value)
|
349
|
+
(c & MAX_READERS) > 0
|
350
|
+
end
|
351
|
+
|
352
|
+
# @!visibility private
|
353
|
+
def running_writer?(c = @Counter.value)
|
354
|
+
c >= RUNNING_WRITER
|
355
|
+
end
|
356
|
+
|
357
|
+
# @!visibility private
|
358
|
+
def waiting_writers(c = @Counter.value)
|
359
|
+
(c & MAX_WRITERS) >> READER_BITS
|
360
|
+
end
|
361
|
+
|
362
|
+
# @!visibility private
|
363
|
+
def waiting_or_running_writer?(c = @Counter.value)
|
364
|
+
c >= WAITING_WRITER
|
365
|
+
end
|
366
|
+
|
367
|
+
# @!visibility private
|
368
|
+
def max_readers?(c = @Counter.value)
|
369
|
+
(c & MAX_READERS) == MAX_READERS
|
370
|
+
end
|
371
|
+
|
372
|
+
# @!visibility private
|
373
|
+
def max_writers?(c = @Counter.value)
|
374
|
+
(c & MAX_WRITERS) == MAX_WRITERS
|
375
|
+
end
|
376
|
+
end
|
377
|
+
end
|
@@ -0,0 +1,181 @@
|
|
1
|
+
require 'thread'
|
2
|
+
require 'concurrent/atomic/abstract_thread_local_var'
|
3
|
+
|
4
|
+
module Concurrent
|
5
|
+
|
6
|
+
# @!visibility private
|
7
|
+
# @!macro internal_implementation_note
|
8
|
+
class RubyThreadLocalVar < AbstractThreadLocalVar
|
9
|
+
|
10
|
+
# Each thread has a (lazily initialized) array of thread-local variable values
|
11
|
+
# Each time a new thread-local var is created, we allocate an "index" for it
|
12
|
+
# For example, if the allocated index is 1, that means slot #1 in EVERY
|
13
|
+
# thread's thread-local array will be used for the value of that TLV
|
14
|
+
#
|
15
|
+
# The good thing about using a per-THREAD structure to hold values, rather
|
16
|
+
# than a per-TLV structure, is that no synchronization is needed when
|
17
|
+
# reading and writing those values (since the structure is only ever
|
18
|
+
# accessed by a single thread)
|
19
|
+
#
|
20
|
+
# Of course, when a TLV is GC'd, 1) we need to recover its index for use
|
21
|
+
# by other new TLVs (otherwise the thread-local arrays could get bigger
|
22
|
+
# and bigger with time), and 2) we need to null out all the references
|
23
|
+
# held in the now-unused slots (both to avoid blocking GC of those objects,
|
24
|
+
# and also to prevent "stale" values from being passed on to a new TLV
|
25
|
+
# when the index is reused)
|
26
|
+
# Because we need to null out freed slots, we need to keep references to
|
27
|
+
# ALL the thread-local arrays -- ARRAYS is for that
|
28
|
+
# But when a Thread is GC'd, we need to drop the reference to its thread-local
|
29
|
+
# array, so we don't leak memory
|
30
|
+
|
31
|
+
FREE = []
|
32
|
+
LOCK = Mutex.new
|
33
|
+
THREAD_LOCAL_ARRAYS = {} # used as a hash set
|
34
|
+
|
35
|
+
# synchronize when not on MRI
|
36
|
+
# on MRI using lock in finalizer leads to "can't be called from trap context" error
|
37
|
+
# so the code is carefully written to be tread-safe on MRI relying on GIL
|
38
|
+
|
39
|
+
if Concurrent.on_cruby?
|
40
|
+
# @!visibility private
|
41
|
+
def self.semi_sync(&block)
|
42
|
+
block.call
|
43
|
+
end
|
44
|
+
else
|
45
|
+
# @!visibility private
|
46
|
+
def self.semi_sync(&block)
|
47
|
+
LOCK.synchronize(&block)
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
private_constant :FREE, :LOCK, :THREAD_LOCAL_ARRAYS
|
52
|
+
|
53
|
+
# @!macro thread_local_var_method_get
|
54
|
+
def value
|
55
|
+
if (array = get_threadlocal_array)
|
56
|
+
value = array[@index]
|
57
|
+
if value.nil?
|
58
|
+
default
|
59
|
+
elsif value.equal?(NULL)
|
60
|
+
nil
|
61
|
+
else
|
62
|
+
value
|
63
|
+
end
|
64
|
+
else
|
65
|
+
default
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
# @!macro thread_local_var_method_set
|
70
|
+
def value=(value)
|
71
|
+
me = Thread.current
|
72
|
+
# We could keep the thread-local arrays in a hash, keyed by Thread
|
73
|
+
# But why? That would require locking
|
74
|
+
# Using Ruby's built-in thread-local storage is faster
|
75
|
+
unless (array = get_threadlocal_array(me))
|
76
|
+
array = set_threadlocal_array([], me)
|
77
|
+
self.class.semi_sync { THREAD_LOCAL_ARRAYS[array.object_id] = array }
|
78
|
+
ObjectSpace.define_finalizer(me, self.class.thread_finalizer(array.object_id))
|
79
|
+
end
|
80
|
+
array[@index] = (value.nil? ? NULL : value)
|
81
|
+
value
|
82
|
+
end
|
83
|
+
|
84
|
+
protected
|
85
|
+
|
86
|
+
# @!visibility private
|
87
|
+
def allocate_storage
|
88
|
+
@index = FREE.pop || next_index
|
89
|
+
|
90
|
+
ObjectSpace.define_finalizer(self, self.class.thread_local_finalizer(@index))
|
91
|
+
end
|
92
|
+
|
93
|
+
# @!visibility private
|
94
|
+
def self.thread_local_finalizer(index)
|
95
|
+
proc do
|
96
|
+
semi_sync do
|
97
|
+
# The cost of GC'ing a TLV is linear in the number of threads using TLVs
|
98
|
+
# But that is natural! More threads means more storage is used per TLV
|
99
|
+
# So naturally more CPU time is required to free more storage
|
100
|
+
#
|
101
|
+
# DO NOT use each_value which might conflict with new pair assignment
|
102
|
+
# into the hash in #value= method
|
103
|
+
THREAD_LOCAL_ARRAYS.values.each { |array| array[index] = nil }
|
104
|
+
# free index has to be published after the arrays are cleared
|
105
|
+
FREE.push(index)
|
106
|
+
end
|
107
|
+
end
|
108
|
+
end
|
109
|
+
|
110
|
+
# @!visibility private
|
111
|
+
def self.thread_finalizer(id)
|
112
|
+
proc do
|
113
|
+
semi_sync do
|
114
|
+
# The thread which used this thread-local array is now gone
|
115
|
+
# So don't hold onto a reference to the array (thus blocking GC)
|
116
|
+
THREAD_LOCAL_ARRAYS.delete(id)
|
117
|
+
end
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
private
|
122
|
+
|
123
|
+
# noinspection RubyClassVariableUsageInspection
|
124
|
+
@@next = 0
|
125
|
+
# noinspection RubyClassVariableUsageInspection
|
126
|
+
def next_index
|
127
|
+
LOCK.synchronize do
|
128
|
+
result = @@next
|
129
|
+
@@next += 1
|
130
|
+
result
|
131
|
+
end
|
132
|
+
end
|
133
|
+
|
134
|
+
if Thread.instance_methods.include?(:thread_variable_get)
|
135
|
+
|
136
|
+
def get_threadlocal_array(thread = Thread.current)
|
137
|
+
thread.thread_variable_get(:__threadlocal_array__)
|
138
|
+
end
|
139
|
+
|
140
|
+
def set_threadlocal_array(array, thread = Thread.current)
|
141
|
+
thread.thread_variable_set(:__threadlocal_array__, array)
|
142
|
+
end
|
143
|
+
|
144
|
+
else
|
145
|
+
|
146
|
+
def get_threadlocal_array(thread = Thread.current)
|
147
|
+
thread[:__threadlocal_array__]
|
148
|
+
end
|
149
|
+
|
150
|
+
def set_threadlocal_array(array, thread = Thread.current)
|
151
|
+
thread[:__threadlocal_array__] = array
|
152
|
+
end
|
153
|
+
end
|
154
|
+
|
155
|
+
# This exists only for use in testing
|
156
|
+
# @!visibility private
|
157
|
+
def value_for(thread)
|
158
|
+
if (array = get_threadlocal_array(thread))
|
159
|
+
value = array[@index]
|
160
|
+
if value.nil?
|
161
|
+
get_default
|
162
|
+
elsif value.equal?(NULL)
|
163
|
+
nil
|
164
|
+
else
|
165
|
+
value
|
166
|
+
end
|
167
|
+
else
|
168
|
+
get_default
|
169
|
+
end
|
170
|
+
end
|
171
|
+
|
172
|
+
# @!visibility private
|
173
|
+
def get_default
|
174
|
+
if @default_block
|
175
|
+
raise "Cannot use default_for with default block"
|
176
|
+
else
|
177
|
+
@default
|
178
|
+
end
|
179
|
+
end
|
180
|
+
end
|
181
|
+
end
|