concurrent-ruby 1.1.5

Sign up to get free protection for your applications and to get access to all the features.
Files changed (143) hide show
  1. checksums.yaml +7 -0
  2. data/CHANGELOG.md +478 -0
  3. data/Gemfile +41 -0
  4. data/LICENSE.md +23 -0
  5. data/README.md +381 -0
  6. data/Rakefile +327 -0
  7. data/ext/concurrent-ruby/ConcurrentRubyService.java +17 -0
  8. data/ext/concurrent-ruby/com/concurrent_ruby/ext/AtomicReferenceLibrary.java +175 -0
  9. data/ext/concurrent-ruby/com/concurrent_ruby/ext/JRubyMapBackendLibrary.java +248 -0
  10. data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicBooleanLibrary.java +93 -0
  11. data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java +113 -0
  12. data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java +159 -0
  13. data/ext/concurrent-ruby/com/concurrent_ruby/ext/SynchronizationLibrary.java +307 -0
  14. data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMap.java +31 -0
  15. data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java +3863 -0
  16. data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/LongAdder.java +203 -0
  17. data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/Striped64.java +342 -0
  18. data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java +3800 -0
  19. data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/LongAdder.java +204 -0
  20. data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/Striped64.java +291 -0
  21. data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166y/ThreadLocalRandom.java +199 -0
  22. data/lib/concurrent-ruby.rb +1 -0
  23. data/lib/concurrent.rb +134 -0
  24. data/lib/concurrent/agent.rb +587 -0
  25. data/lib/concurrent/array.rb +66 -0
  26. data/lib/concurrent/async.rb +459 -0
  27. data/lib/concurrent/atom.rb +222 -0
  28. data/lib/concurrent/atomic/abstract_thread_local_var.rb +66 -0
  29. data/lib/concurrent/atomic/atomic_boolean.rb +126 -0
  30. data/lib/concurrent/atomic/atomic_fixnum.rb +143 -0
  31. data/lib/concurrent/atomic/atomic_markable_reference.rb +164 -0
  32. data/lib/concurrent/atomic/atomic_reference.rb +204 -0
  33. data/lib/concurrent/atomic/count_down_latch.rb +100 -0
  34. data/lib/concurrent/atomic/cyclic_barrier.rb +128 -0
  35. data/lib/concurrent/atomic/event.rb +109 -0
  36. data/lib/concurrent/atomic/java_count_down_latch.rb +42 -0
  37. data/lib/concurrent/atomic/java_thread_local_var.rb +37 -0
  38. data/lib/concurrent/atomic/mutex_atomic_boolean.rb +62 -0
  39. data/lib/concurrent/atomic/mutex_atomic_fixnum.rb +75 -0
  40. data/lib/concurrent/atomic/mutex_count_down_latch.rb +44 -0
  41. data/lib/concurrent/atomic/mutex_semaphore.rb +115 -0
  42. data/lib/concurrent/atomic/read_write_lock.rb +254 -0
  43. data/lib/concurrent/atomic/reentrant_read_write_lock.rb +379 -0
  44. data/lib/concurrent/atomic/ruby_thread_local_var.rb +161 -0
  45. data/lib/concurrent/atomic/semaphore.rb +145 -0
  46. data/lib/concurrent/atomic/thread_local_var.rb +104 -0
  47. data/lib/concurrent/atomic_reference/mutex_atomic.rb +56 -0
  48. data/lib/concurrent/atomic_reference/numeric_cas_wrapper.rb +28 -0
  49. data/lib/concurrent/atomics.rb +10 -0
  50. data/lib/concurrent/collection/copy_on_notify_observer_set.rb +107 -0
  51. data/lib/concurrent/collection/copy_on_write_observer_set.rb +111 -0
  52. data/lib/concurrent/collection/java_non_concurrent_priority_queue.rb +84 -0
  53. data/lib/concurrent/collection/lock_free_stack.rb +158 -0
  54. data/lib/concurrent/collection/map/atomic_reference_map_backend.rb +927 -0
  55. data/lib/concurrent/collection/map/mri_map_backend.rb +66 -0
  56. data/lib/concurrent/collection/map/non_concurrent_map_backend.rb +140 -0
  57. data/lib/concurrent/collection/map/synchronized_map_backend.rb +82 -0
  58. data/lib/concurrent/collection/non_concurrent_priority_queue.rb +143 -0
  59. data/lib/concurrent/collection/ruby_non_concurrent_priority_queue.rb +150 -0
  60. data/lib/concurrent/concern/deprecation.rb +34 -0
  61. data/lib/concurrent/concern/dereferenceable.rb +73 -0
  62. data/lib/concurrent/concern/logging.rb +32 -0
  63. data/lib/concurrent/concern/obligation.rb +220 -0
  64. data/lib/concurrent/concern/observable.rb +110 -0
  65. data/lib/concurrent/concurrent_ruby.jar +0 -0
  66. data/lib/concurrent/configuration.rb +184 -0
  67. data/lib/concurrent/constants.rb +8 -0
  68. data/lib/concurrent/dataflow.rb +81 -0
  69. data/lib/concurrent/delay.rb +199 -0
  70. data/lib/concurrent/errors.rb +69 -0
  71. data/lib/concurrent/exchanger.rb +352 -0
  72. data/lib/concurrent/executor/abstract_executor_service.rb +134 -0
  73. data/lib/concurrent/executor/cached_thread_pool.rb +62 -0
  74. data/lib/concurrent/executor/executor_service.rb +185 -0
  75. data/lib/concurrent/executor/fixed_thread_pool.rb +206 -0
  76. data/lib/concurrent/executor/immediate_executor.rb +66 -0
  77. data/lib/concurrent/executor/indirect_immediate_executor.rb +44 -0
  78. data/lib/concurrent/executor/java_executor_service.rb +91 -0
  79. data/lib/concurrent/executor/java_single_thread_executor.rb +29 -0
  80. data/lib/concurrent/executor/java_thread_pool_executor.rb +123 -0
  81. data/lib/concurrent/executor/ruby_executor_service.rb +78 -0
  82. data/lib/concurrent/executor/ruby_single_thread_executor.rb +22 -0
  83. data/lib/concurrent/executor/ruby_thread_pool_executor.rb +362 -0
  84. data/lib/concurrent/executor/safe_task_executor.rb +35 -0
  85. data/lib/concurrent/executor/serial_executor_service.rb +34 -0
  86. data/lib/concurrent/executor/serialized_execution.rb +107 -0
  87. data/lib/concurrent/executor/serialized_execution_delegator.rb +28 -0
  88. data/lib/concurrent/executor/simple_executor_service.rb +100 -0
  89. data/lib/concurrent/executor/single_thread_executor.rb +56 -0
  90. data/lib/concurrent/executor/thread_pool_executor.rb +87 -0
  91. data/lib/concurrent/executor/timer_set.rb +173 -0
  92. data/lib/concurrent/executors.rb +20 -0
  93. data/lib/concurrent/future.rb +141 -0
  94. data/lib/concurrent/hash.rb +59 -0
  95. data/lib/concurrent/immutable_struct.rb +93 -0
  96. data/lib/concurrent/ivar.rb +207 -0
  97. data/lib/concurrent/map.rb +337 -0
  98. data/lib/concurrent/maybe.rb +229 -0
  99. data/lib/concurrent/mutable_struct.rb +229 -0
  100. data/lib/concurrent/mvar.rb +242 -0
  101. data/lib/concurrent/options.rb +42 -0
  102. data/lib/concurrent/promise.rb +579 -0
  103. data/lib/concurrent/promises.rb +2167 -0
  104. data/lib/concurrent/re_include.rb +58 -0
  105. data/lib/concurrent/scheduled_task.rb +318 -0
  106. data/lib/concurrent/set.rb +66 -0
  107. data/lib/concurrent/settable_struct.rb +129 -0
  108. data/lib/concurrent/synchronization.rb +30 -0
  109. data/lib/concurrent/synchronization/abstract_lockable_object.rb +98 -0
  110. data/lib/concurrent/synchronization/abstract_object.rb +24 -0
  111. data/lib/concurrent/synchronization/abstract_struct.rb +160 -0
  112. data/lib/concurrent/synchronization/condition.rb +60 -0
  113. data/lib/concurrent/synchronization/jruby_lockable_object.rb +13 -0
  114. data/lib/concurrent/synchronization/jruby_object.rb +45 -0
  115. data/lib/concurrent/synchronization/lock.rb +36 -0
  116. data/lib/concurrent/synchronization/lockable_object.rb +74 -0
  117. data/lib/concurrent/synchronization/mri_object.rb +44 -0
  118. data/lib/concurrent/synchronization/mutex_lockable_object.rb +76 -0
  119. data/lib/concurrent/synchronization/object.rb +183 -0
  120. data/lib/concurrent/synchronization/rbx_lockable_object.rb +65 -0
  121. data/lib/concurrent/synchronization/rbx_object.rb +49 -0
  122. data/lib/concurrent/synchronization/truffleruby_object.rb +47 -0
  123. data/lib/concurrent/synchronization/volatile.rb +36 -0
  124. data/lib/concurrent/thread_safe/synchronized_delegator.rb +50 -0
  125. data/lib/concurrent/thread_safe/util.rb +16 -0
  126. data/lib/concurrent/thread_safe/util/adder.rb +74 -0
  127. data/lib/concurrent/thread_safe/util/cheap_lockable.rb +118 -0
  128. data/lib/concurrent/thread_safe/util/data_structures.rb +63 -0
  129. data/lib/concurrent/thread_safe/util/power_of_two_tuple.rb +38 -0
  130. data/lib/concurrent/thread_safe/util/striped64.rb +246 -0
  131. data/lib/concurrent/thread_safe/util/volatile.rb +75 -0
  132. data/lib/concurrent/thread_safe/util/xor_shift_random.rb +50 -0
  133. data/lib/concurrent/timer_task.rb +334 -0
  134. data/lib/concurrent/tuple.rb +86 -0
  135. data/lib/concurrent/tvar.rb +258 -0
  136. data/lib/concurrent/utility/at_exit.rb +97 -0
  137. data/lib/concurrent/utility/engine.rb +56 -0
  138. data/lib/concurrent/utility/monotonic_time.rb +58 -0
  139. data/lib/concurrent/utility/native_extension_loader.rb +79 -0
  140. data/lib/concurrent/utility/native_integer.rb +53 -0
  141. data/lib/concurrent/utility/processor_counter.rb +158 -0
  142. data/lib/concurrent/version.rb +3 -0
  143. metadata +193 -0
@@ -0,0 +1,927 @@
1
+ require 'concurrent/constants'
2
+ require 'concurrent/thread_safe/util'
3
+ require 'concurrent/thread_safe/util/adder'
4
+ require 'concurrent/thread_safe/util/cheap_lockable'
5
+ require 'concurrent/thread_safe/util/power_of_two_tuple'
6
+ require 'concurrent/thread_safe/util/volatile'
7
+ require 'concurrent/thread_safe/util/xor_shift_random'
8
+
9
+ module Concurrent
10
+
11
+ # @!visibility private
12
+ module Collection
13
+
14
+ # A Ruby port of the Doug Lea's jsr166e.ConcurrentHashMapV8 class version 1.59
15
+ # available in public domain.
16
+ #
17
+ # Original source code available here:
18
+ # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ConcurrentHashMapV8.java?revision=1.59
19
+ #
20
+ # The Ruby port skips out the +TreeBin+ (red-black trees for use in bins whose
21
+ # size exceeds a threshold).
22
+ #
23
+ # A hash table supporting full concurrency of retrievals and high expected
24
+ # concurrency for updates. However, even though all operations are
25
+ # thread-safe, retrieval operations do _not_ entail locking, and there is
26
+ # _not_ any support for locking the entire table in a way that prevents all
27
+ # access.
28
+ #
29
+ # Retrieval operations generally do not block, so may overlap with update
30
+ # operations. Retrievals reflect the results of the most recently _completed_
31
+ # update operations holding upon their onset. (More formally, an update
32
+ # operation for a given key bears a _happens-before_ relation with any (non
33
+ # +nil+) retrieval for that key reporting the updated value.) For aggregate
34
+ # operations such as +clear()+, concurrent retrievals may reflect insertion or
35
+ # removal of only some entries. Similarly, the +each_pair+ iterator yields
36
+ # elements reflecting the state of the hash table at some point at or since
37
+ # the start of the +each_pair+. Bear in mind that the results of aggregate
38
+ # status methods including +size()+ and +empty?+} are typically useful only
39
+ # when a map is not undergoing concurrent updates in other threads. Otherwise
40
+ # the results of these methods reflect transient states that may be adequate
41
+ # for monitoring or estimation purposes, but not for program control.
42
+ #
43
+ # The table is dynamically expanded when there are too many collisions (i.e.,
44
+ # keys that have distinct hash codes but fall into the same slot modulo the
45
+ # table size), with the expected average effect of maintaining roughly two
46
+ # bins per mapping (corresponding to a 0.75 load factor threshold for
47
+ # resizing). There may be much variance around this average as mappings are
48
+ # added and removed, but overall, this maintains a commonly accepted
49
+ # time/space tradeoff for hash tables. However, resizing this or any other
50
+ # kind of hash table may be a relatively slow operation. When possible, it is
51
+ # a good idea to provide a size estimate as an optional :initial_capacity
52
+ # initializer argument. An additional optional :load_factor constructor
53
+ # argument provides a further means of customizing initial table capacity by
54
+ # specifying the table density to be used in calculating the amount of space
55
+ # to allocate for the given number of elements. Note that using many keys with
56
+ # exactly the same +hash+ is a sure way to slow down performance of any hash
57
+ # table.
58
+ #
59
+ # ## Design overview
60
+ #
61
+ # The primary design goal of this hash table is to maintain concurrent
62
+ # readability (typically method +[]+, but also iteration and related methods)
63
+ # while minimizing update contention. Secondary goals are to keep space
64
+ # consumption about the same or better than plain +Hash+, and to support high
65
+ # initial insertion rates on an empty table by many threads.
66
+ #
67
+ # Each key-value mapping is held in a +Node+. The validation-based approach
68
+ # explained below leads to a lot of code sprawl because retry-control
69
+ # precludes factoring into smaller methods.
70
+ #
71
+ # The table is lazily initialized to a power-of-two size upon the first
72
+ # insertion. Each bin in the table normally contains a list of +Node+s (most
73
+ # often, the list has only zero or one +Node+). Table accesses require
74
+ # volatile/atomic reads, writes, and CASes. The lists of nodes within bins are
75
+ # always accurately traversable under volatile reads, so long as lookups check
76
+ # hash code and non-nullness of value before checking key equality.
77
+ #
78
+ # We use the top two bits of +Node+ hash fields for control purposes -- they
79
+ # are available anyway because of addressing constraints. As explained further
80
+ # below, these top bits are used as follows:
81
+ #
82
+ # - 00 - Normal
83
+ # - 01 - Locked
84
+ # - 11 - Locked and may have a thread waiting for lock
85
+ # - 10 - +Node+ is a forwarding node
86
+ #
87
+ # The lower 28 bits of each +Node+'s hash field contain a the key's hash code,
88
+ # except for forwarding nodes, for which the lower bits are zero (and so
89
+ # always have hash field == +MOVED+).
90
+ #
91
+ # Insertion (via +[]=+ or its variants) of the first node in an empty bin is
92
+ # performed by just CASing it to the bin. This is by far the most common case
93
+ # for put operations under most key/hash distributions. Other update
94
+ # operations (insert, delete, and replace) require locks. We do not want to
95
+ # waste the space required to associate a distinct lock object with each bin,
96
+ # so instead use the first node of a bin list itself as a lock. Blocking
97
+ # support for these locks relies +Concurrent::ThreadSafe::Util::CheapLockable. However, we also need a
98
+ # +try_lock+ construction, so we overlay these by using bits of the +Node+
99
+ # hash field for lock control (see above), and so normally use builtin
100
+ # monitors only for blocking and signalling using
101
+ # +cheap_wait+/+cheap_broadcast+ constructions. See +Node#try_await_lock+.
102
+ #
103
+ # Using the first node of a list as a lock does not by itself suffice though:
104
+ # When a node is locked, any update must first validate that it is still the
105
+ # first node after locking it, and retry if not. Because new nodes are always
106
+ # appended to lists, once a node is first in a bin, it remains first until
107
+ # deleted or the bin becomes invalidated (upon resizing). However, operations
108
+ # that only conditionally update may inspect nodes until the point of update.
109
+ # This is a converse of sorts to the lazy locking technique described by
110
+ # Herlihy & Shavit.
111
+ #
112
+ # The main disadvantage of per-bin locks is that other update operations on
113
+ # other nodes in a bin list protected by the same lock can stall, for example
114
+ # when user +eql?+ or mapping functions take a long time. However,
115
+ # statistically, under random hash codes, this is not a common problem.
116
+ # Ideally, the frequency of nodes in bins follows a Poisson distribution
117
+ # (http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of
118
+ # about 0.5 on average, given the resizing threshold of 0.75, although with a
119
+ # large variance because of resizing granularity. Ignoring variance, the
120
+ # expected occurrences of list size k are (exp(-0.5) * pow(0.5, k) /
121
+ # factorial(k)). The first values are:
122
+ #
123
+ # - 0: 0.60653066
124
+ # - 1: 0.30326533
125
+ # - 2: 0.07581633
126
+ # - 3: 0.01263606
127
+ # - 4: 0.00157952
128
+ # - 5: 0.00015795
129
+ # - 6: 0.00001316
130
+ # - 7: 0.00000094
131
+ # - 8: 0.00000006
132
+ # - more: less than 1 in ten million
133
+ #
134
+ # Lock contention probability for two threads accessing distinct elements is
135
+ # roughly 1 / (8 * #elements) under random hashes.
136
+ #
137
+ # The table is resized when occupancy exceeds a percentage threshold
138
+ # (nominally, 0.75, but see below). Only a single thread performs the resize
139
+ # (using field +size_control+, to arrange exclusion), but the table otherwise
140
+ # remains usable for reads and updates. Resizing proceeds by transferring
141
+ # bins, one by one, from the table to the next table. Because we are using
142
+ # power-of-two expansion, the elements from each bin must either stay at same
143
+ # index, or move with a power of two offset. We eliminate unnecessary node
144
+ # creation by catching cases where old nodes can be reused because their next
145
+ # fields won't change. On average, only about one-sixth of them need cloning
146
+ # when a table doubles. The nodes they replace will be garbage collectable as
147
+ # soon as they are no longer referenced by any reader thread that may be in
148
+ # the midst of concurrently traversing table. Upon transfer, the old table bin
149
+ # contains only a special forwarding node (with hash field +MOVED+) that
150
+ # contains the next table as its key. On encountering a forwarding node,
151
+ # access and update operations restart, using the new table.
152
+ #
153
+ # Each bin transfer requires its bin lock. However, unlike other cases, a
154
+ # transfer can skip a bin if it fails to acquire its lock, and revisit it
155
+ # later. Method +rebuild+ maintains a buffer of TRANSFER_BUFFER_SIZE bins that
156
+ # have been skipped because of failure to acquire a lock, and blocks only if
157
+ # none are available (i.e., only very rarely). The transfer operation must
158
+ # also ensure that all accessible bins in both the old and new table are
159
+ # usable by any traversal. When there are no lock acquisition failures, this
160
+ # is arranged simply by proceeding from the last bin (+table.size - 1+) up
161
+ # towards the first. Upon seeing a forwarding node, traversals arrange to move
162
+ # to the new table without revisiting nodes. However, when any node is skipped
163
+ # during a transfer, all earlier table bins may have become visible, so are
164
+ # initialized with a reverse-forwarding node back to the old table until the
165
+ # new ones are established. (This sometimes requires transiently locking a
166
+ # forwarding node, which is possible under the above encoding.) These more
167
+ # expensive mechanics trigger only when necessary.
168
+ #
169
+ # The traversal scheme also applies to partial traversals of
170
+ # ranges of bins (via an alternate Traverser constructor)
171
+ # to support partitioned aggregate operations. Also, read-only
172
+ # operations give up if ever forwarded to a null table, which
173
+ # provides support for shutdown-style clearing, which is also not
174
+ # currently implemented.
175
+ #
176
+ # Lazy table initialization minimizes footprint until first use.
177
+ #
178
+ # The element count is maintained using a +Concurrent::ThreadSafe::Util::Adder+,
179
+ # which avoids contention on updates but can encounter cache thrashing
180
+ # if read too frequently during concurrent access. To avoid reading so
181
+ # often, resizing is attempted either when a bin lock is
182
+ # contended, or upon adding to a bin already holding two or more
183
+ # nodes (checked before adding in the +x_if_absent+ methods, after
184
+ # adding in others). Under uniform hash distributions, the
185
+ # probability of this occurring at threshold is around 13%,
186
+ # meaning that only about 1 in 8 puts check threshold (and after
187
+ # resizing, many fewer do so). But this approximation has high
188
+ # variance for small table sizes, so we check on any collision
189
+ # for sizes <= 64. The bulk putAll operation further reduces
190
+ # contention by only committing count updates upon these size
191
+ # checks.
192
+ #
193
+ # @!visibility private
194
+ class AtomicReferenceMapBackend
195
+
196
+ # @!visibility private
197
+ class Table < Concurrent::ThreadSafe::Util::PowerOfTwoTuple
198
+ def cas_new_node(i, hash, key, value)
199
+ cas(i, nil, Node.new(hash, key, value))
200
+ end
201
+
202
+ def try_to_cas_in_computed(i, hash, key)
203
+ succeeded = false
204
+ new_value = nil
205
+ new_node = Node.new(locked_hash = hash | LOCKED, key, NULL)
206
+ if cas(i, nil, new_node)
207
+ begin
208
+ if NULL == (new_value = yield(NULL))
209
+ was_null = true
210
+ else
211
+ new_node.value = new_value
212
+ end
213
+ succeeded = true
214
+ ensure
215
+ volatile_set(i, nil) if !succeeded || was_null
216
+ new_node.unlock_via_hash(locked_hash, hash)
217
+ end
218
+ end
219
+ return succeeded, new_value
220
+ end
221
+
222
+ def try_lock_via_hash(i, node, node_hash)
223
+ node.try_lock_via_hash(node_hash) do
224
+ yield if volatile_get(i) == node
225
+ end
226
+ end
227
+
228
+ def delete_node_at(i, node, predecessor_node)
229
+ if predecessor_node
230
+ predecessor_node.next = node.next
231
+ else
232
+ volatile_set(i, node.next)
233
+ end
234
+ end
235
+ end
236
+
237
+ # Key-value entry. Nodes with a hash field of +MOVED+ are special, and do
238
+ # not contain user keys or values. Otherwise, keys are never +nil+, and
239
+ # +NULL+ +value+ fields indicate that a node is in the process of being
240
+ # deleted or created. For purposes of read-only access, a key may be read
241
+ # before a value, but can only be used after checking value to be +!= NULL+.
242
+ #
243
+ # @!visibility private
244
+ class Node
245
+ extend Concurrent::ThreadSafe::Util::Volatile
246
+ attr_volatile :hash, :value, :next
247
+
248
+ include Concurrent::ThreadSafe::Util::CheapLockable
249
+
250
+ bit_shift = Concurrent::ThreadSafe::Util::FIXNUM_BIT_SIZE - 2 # need 2 bits for ourselves
251
+ # Encodings for special uses of Node hash fields. See above for explanation.
252
+ MOVED = ('10' << ('0' * bit_shift)).to_i(2) # hash field for forwarding nodes
253
+ LOCKED = ('01' << ('0' * bit_shift)).to_i(2) # set/tested only as a bit
254
+ WAITING = ('11' << ('0' * bit_shift)).to_i(2) # both bits set/tested together
255
+ HASH_BITS = ('00' << ('1' * bit_shift)).to_i(2) # usable bits of normal node hash
256
+
257
+ SPIN_LOCK_ATTEMPTS = Concurrent::ThreadSafe::Util::CPU_COUNT > 1 ? Concurrent::ThreadSafe::Util::CPU_COUNT * 2 : 0
258
+
259
+ attr_reader :key
260
+
261
+ def initialize(hash, key, value, next_node = nil)
262
+ super()
263
+ @key = key
264
+ self.lazy_set_hash(hash)
265
+ self.lazy_set_value(value)
266
+ self.next = next_node
267
+ end
268
+
269
+ # Spins a while if +LOCKED+ bit set and this node is the first of its bin,
270
+ # and then sets +WAITING+ bits on hash field and blocks (once) if they are
271
+ # still set. It is OK for this method to return even if lock is not
272
+ # available upon exit, which enables these simple single-wait mechanics.
273
+ #
274
+ # The corresponding signalling operation is performed within callers: Upon
275
+ # detecting that +WAITING+ has been set when unlocking lock (via a failed
276
+ # CAS from non-waiting +LOCKED+ state), unlockers acquire the
277
+ # +cheap_synchronize+ lock and perform a +cheap_broadcast+.
278
+ def try_await_lock(table, i)
279
+ if table && i >= 0 && i < table.size # bounds check, TODO: why are we bounds checking?
280
+ spins = SPIN_LOCK_ATTEMPTS
281
+ randomizer = base_randomizer = Concurrent::ThreadSafe::Util::XorShiftRandom.get
282
+ while equal?(table.volatile_get(i)) && self.class.locked_hash?(my_hash = hash)
283
+ if spins >= 0
284
+ if (randomizer = (randomizer >> 1)).even? # spin at random
285
+ if (spins -= 1) == 0
286
+ Thread.pass # yield before blocking
287
+ else
288
+ randomizer = base_randomizer = Concurrent::ThreadSafe::Util::XorShiftRandom.xorshift(base_randomizer) if randomizer.zero?
289
+ end
290
+ end
291
+ elsif cas_hash(my_hash, my_hash | WAITING)
292
+ force_acquire_lock(table, i)
293
+ break
294
+ end
295
+ end
296
+ end
297
+ end
298
+
299
+ def key?(key)
300
+ @key.eql?(key)
301
+ end
302
+
303
+ def matches?(key, hash)
304
+ pure_hash == hash && key?(key)
305
+ end
306
+
307
+ def pure_hash
308
+ hash & HASH_BITS
309
+ end
310
+
311
+ def try_lock_via_hash(node_hash = hash)
312
+ if cas_hash(node_hash, locked_hash = node_hash | LOCKED)
313
+ begin
314
+ yield
315
+ ensure
316
+ unlock_via_hash(locked_hash, node_hash)
317
+ end
318
+ end
319
+ end
320
+
321
+ def locked?
322
+ self.class.locked_hash?(hash)
323
+ end
324
+
325
+ def unlock_via_hash(locked_hash, node_hash)
326
+ unless cas_hash(locked_hash, node_hash)
327
+ self.hash = node_hash
328
+ cheap_synchronize { cheap_broadcast }
329
+ end
330
+ end
331
+
332
+ private
333
+ def force_acquire_lock(table, i)
334
+ cheap_synchronize do
335
+ if equal?(table.volatile_get(i)) && (hash & WAITING) == WAITING
336
+ cheap_wait
337
+ else
338
+ cheap_broadcast # possibly won race vs signaller
339
+ end
340
+ end
341
+ end
342
+
343
+ class << self
344
+ def locked_hash?(hash)
345
+ (hash & LOCKED) != 0
346
+ end
347
+ end
348
+ end
349
+
350
+ # shorthands
351
+ MOVED = Node::MOVED
352
+ LOCKED = Node::LOCKED
353
+ WAITING = Node::WAITING
354
+ HASH_BITS = Node::HASH_BITS
355
+
356
+ NOW_RESIZING = -1
357
+ DEFAULT_CAPACITY = 16
358
+ MAX_CAPACITY = Concurrent::ThreadSafe::Util::MAX_INT
359
+
360
+ # The buffer size for skipped bins during transfers. The
361
+ # value is arbitrary but should be large enough to avoid
362
+ # most locking stalls during resizes.
363
+ TRANSFER_BUFFER_SIZE = 32
364
+
365
+ extend Concurrent::ThreadSafe::Util::Volatile
366
+ attr_volatile :table, # The array of bins. Lazily initialized upon first insertion. Size is always a power of two.
367
+
368
+ # Table initialization and resizing control. When negative, the
369
+ # table is being initialized or resized. Otherwise, when table is
370
+ # null, holds the initial table size to use upon creation, or 0
371
+ # for default. After initialization, holds the next element count
372
+ # value upon which to resize the table.
373
+ :size_control
374
+
375
+ def initialize(options = nil)
376
+ super()
377
+ @counter = Concurrent::ThreadSafe::Util::Adder.new
378
+ initial_capacity = options && options[:initial_capacity] || DEFAULT_CAPACITY
379
+ self.size_control = (capacity = table_size_for(initial_capacity)) > MAX_CAPACITY ? MAX_CAPACITY : capacity
380
+ end
381
+
382
+ def get_or_default(key, else_value = nil)
383
+ hash = key_hash(key)
384
+ current_table = table
385
+ while current_table
386
+ node = current_table.volatile_get_by_hash(hash)
387
+ current_table =
388
+ while node
389
+ if (node_hash = node.hash) == MOVED
390
+ break node.key
391
+ elsif (node_hash & HASH_BITS) == hash && node.key?(key) && NULL != (value = node.value)
392
+ return value
393
+ end
394
+ node = node.next
395
+ end
396
+ end
397
+ else_value
398
+ end
399
+
400
+ def [](key)
401
+ get_or_default(key)
402
+ end
403
+
404
+ def key?(key)
405
+ get_or_default(key, NULL) != NULL
406
+ end
407
+
408
+ def []=(key, value)
409
+ get_and_set(key, value)
410
+ value
411
+ end
412
+
413
+ def compute_if_absent(key)
414
+ hash = key_hash(key)
415
+ current_table = table || initialize_table
416
+ while true
417
+ if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash)))
418
+ succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key) { yield }
419
+ if succeeded
420
+ increment_size
421
+ return new_value
422
+ end
423
+ elsif (node_hash = node.hash) == MOVED
424
+ current_table = node.key
425
+ elsif NULL != (current_value = find_value_in_node_list(node, key, hash, node_hash & HASH_BITS))
426
+ return current_value
427
+ elsif Node.locked_hash?(node_hash)
428
+ try_await_lock(current_table, i, node)
429
+ else
430
+ succeeded, value = attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) { yield }
431
+ return value if succeeded
432
+ end
433
+ end
434
+ end
435
+
436
+ def compute_if_present(key)
437
+ new_value = nil
438
+ internal_replace(key) do |old_value|
439
+ if (new_value = yield(NULL == old_value ? nil : old_value)).nil?
440
+ NULL
441
+ else
442
+ new_value
443
+ end
444
+ end
445
+ new_value
446
+ end
447
+
448
+ def compute(key)
449
+ internal_compute(key) do |old_value|
450
+ if (new_value = yield(NULL == old_value ? nil : old_value)).nil?
451
+ NULL
452
+ else
453
+ new_value
454
+ end
455
+ end
456
+ end
457
+
458
+ def merge_pair(key, value)
459
+ internal_compute(key) do |old_value|
460
+ if NULL == old_value || !(value = yield(old_value)).nil?
461
+ value
462
+ else
463
+ NULL
464
+ end
465
+ end
466
+ end
467
+
468
+ def replace_pair(key, old_value, new_value)
469
+ NULL != internal_replace(key, old_value) { new_value }
470
+ end
471
+
472
+ def replace_if_exists(key, new_value)
473
+ if (result = internal_replace(key) { new_value }) && NULL != result
474
+ result
475
+ end
476
+ end
477
+
478
+ def get_and_set(key, value) # internalPut in the original CHMV8
479
+ hash = key_hash(key)
480
+ current_table = table || initialize_table
481
+ while true
482
+ if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash)))
483
+ if current_table.cas_new_node(i, hash, key, value)
484
+ increment_size
485
+ break
486
+ end
487
+ elsif (node_hash = node.hash) == MOVED
488
+ current_table = node.key
489
+ elsif Node.locked_hash?(node_hash)
490
+ try_await_lock(current_table, i, node)
491
+ else
492
+ succeeded, old_value = attempt_get_and_set(key, value, hash, current_table, i, node, node_hash)
493
+ break old_value if succeeded
494
+ end
495
+ end
496
+ end
497
+
498
+ def delete(key)
499
+ replace_if_exists(key, NULL)
500
+ end
501
+
502
+ def delete_pair(key, value)
503
+ result = internal_replace(key, value) { NULL }
504
+ if result && NULL != result
505
+ !!result
506
+ else
507
+ false
508
+ end
509
+ end
510
+
511
+ def each_pair
512
+ return self unless current_table = table
513
+ current_table_size = base_size = current_table.size
514
+ i = base_index = 0
515
+ while base_index < base_size
516
+ if node = current_table.volatile_get(i)
517
+ if node.hash == MOVED
518
+ current_table = node.key
519
+ current_table_size = current_table.size
520
+ else
521
+ begin
522
+ if NULL != (value = node.value) # skip deleted or special nodes
523
+ yield node.key, value
524
+ end
525
+ end while node = node.next
526
+ end
527
+ end
528
+
529
+ if (i_with_base = i + base_size) < current_table_size
530
+ i = i_with_base # visit upper slots if present
531
+ else
532
+ i = base_index += 1
533
+ end
534
+ end
535
+ self
536
+ end
537
+
538
+ def size
539
+ (sum = @counter.sum) < 0 ? 0 : sum # ignore transient negative values
540
+ end
541
+
542
+ def empty?
543
+ size == 0
544
+ end
545
+
546
+ # Implementation for clear. Steps through each bin, removing all nodes.
547
+ def clear
548
+ return self unless current_table = table
549
+ current_table_size = current_table.size
550
+ deleted_count = i = 0
551
+ while i < current_table_size
552
+ if !(node = current_table.volatile_get(i))
553
+ i += 1
554
+ elsif (node_hash = node.hash) == MOVED
555
+ current_table = node.key
556
+ current_table_size = current_table.size
557
+ elsif Node.locked_hash?(node_hash)
558
+ decrement_size(deleted_count) # opportunistically update count
559
+ deleted_count = 0
560
+ node.try_await_lock(current_table, i)
561
+ else
562
+ current_table.try_lock_via_hash(i, node, node_hash) do
563
+ begin
564
+ deleted_count += 1 if NULL != node.value # recheck under lock
565
+ node.value = nil
566
+ end while node = node.next
567
+ current_table.volatile_set(i, nil)
568
+ i += 1
569
+ end
570
+ end
571
+ end
572
+ decrement_size(deleted_count)
573
+ self
574
+ end
575
+
576
+ private
577
+ # Internal versions of the insertion methods, each a
578
+ # little more complicated than the last. All have
579
+ # the same basic structure:
580
+ # 1. If table uninitialized, create
581
+ # 2. If bin empty, try to CAS new node
582
+ # 3. If bin stale, use new table
583
+ # 4. Lock and validate; if valid, scan and add or update
584
+ #
585
+ # The others interweave other checks and/or alternative actions:
586
+ # * Plain +get_and_set+ checks for and performs resize after insertion.
587
+ # * compute_if_absent prescans for mapping without lock (and fails to add
588
+ # if present), which also makes pre-emptive resize checks worthwhile.
589
+ #
590
+ # Someday when details settle down a bit more, it might be worth
591
+ # some factoring to reduce sprawl.
592
+ def internal_replace(key, expected_old_value = NULL, &block)
593
+ hash = key_hash(key)
594
+ current_table = table
595
+ while current_table
596
+ if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash)))
597
+ break
598
+ elsif (node_hash = node.hash) == MOVED
599
+ current_table = node.key
600
+ elsif (node_hash & HASH_BITS) != hash && !node.next # precheck
601
+ break # rules out possible existence
602
+ elsif Node.locked_hash?(node_hash)
603
+ try_await_lock(current_table, i, node)
604
+ else
605
+ succeeded, old_value = attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash, &block)
606
+ return old_value if succeeded
607
+ end
608
+ end
609
+ NULL
610
+ end
611
+
612
+ def attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash)
613
+ current_table.try_lock_via_hash(i, node, node_hash) do
614
+ predecessor_node = nil
615
+ old_value = NULL
616
+ begin
617
+ if node.matches?(key, hash) && NULL != (current_value = node.value)
618
+ if NULL == expected_old_value || expected_old_value == current_value # NULL == expected_old_value means whatever value
619
+ old_value = current_value
620
+ if NULL == (node.value = yield(old_value))
621
+ current_table.delete_node_at(i, node, predecessor_node)
622
+ decrement_size
623
+ end
624
+ end
625
+ break
626
+ end
627
+
628
+ predecessor_node = node
629
+ end while node = node.next
630
+
631
+ return true, old_value
632
+ end
633
+ end
634
+
635
+ def find_value_in_node_list(node, key, hash, pure_hash)
636
+ do_check_for_resize = false
637
+ while true
638
+ if pure_hash == hash && node.key?(key) && NULL != (value = node.value)
639
+ return value
640
+ elsif node = node.next
641
+ do_check_for_resize = true # at least 2 nodes -> check for resize
642
+ pure_hash = node.pure_hash
643
+ else
644
+ return NULL
645
+ end
646
+ end
647
+ ensure
648
+ check_for_resize if do_check_for_resize
649
+ end
650
+
651
+ def internal_compute(key, &block)
652
+ hash = key_hash(key)
653
+ current_table = table || initialize_table
654
+ while true
655
+ if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash)))
656
+ succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key, &block)
657
+ if succeeded
658
+ if NULL == new_value
659
+ break nil
660
+ else
661
+ increment_size
662
+ break new_value
663
+ end
664
+ end
665
+ elsif (node_hash = node.hash) == MOVED
666
+ current_table = node.key
667
+ elsif Node.locked_hash?(node_hash)
668
+ try_await_lock(current_table, i, node)
669
+ else
670
+ succeeded, new_value = attempt_compute(key, hash, current_table, i, node, node_hash, &block)
671
+ break new_value if succeeded
672
+ end
673
+ end
674
+ end
675
+
676
+ def attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash)
677
+ added = false
678
+ current_table.try_lock_via_hash(i, node, node_hash) do
679
+ while true
680
+ if node.matches?(key, hash) && NULL != (value = node.value)
681
+ return true, value
682
+ end
683
+ last = node
684
+ unless node = node.next
685
+ last.next = Node.new(hash, key, value = yield)
686
+ added = true
687
+ increment_size
688
+ return true, value
689
+ end
690
+ end
691
+ end
692
+ ensure
693
+ check_for_resize if added
694
+ end
695
+
696
+ def attempt_compute(key, hash, current_table, i, node, node_hash)
697
+ added = false
698
+ current_table.try_lock_via_hash(i, node, node_hash) do
699
+ predecessor_node = nil
700
+ while true
701
+ if node.matches?(key, hash) && NULL != (value = node.value)
702
+ if NULL == (node.value = value = yield(value))
703
+ current_table.delete_node_at(i, node, predecessor_node)
704
+ decrement_size
705
+ value = nil
706
+ end
707
+ return true, value
708
+ end
709
+ predecessor_node = node
710
+ unless node = node.next
711
+ if NULL == (value = yield(NULL))
712
+ value = nil
713
+ else
714
+ predecessor_node.next = Node.new(hash, key, value)
715
+ added = true
716
+ increment_size
717
+ end
718
+ return true, value
719
+ end
720
+ end
721
+ end
722
+ ensure
723
+ check_for_resize if added
724
+ end
725
+
726
+ def attempt_get_and_set(key, value, hash, current_table, i, node, node_hash)
727
+ node_nesting = nil
728
+ current_table.try_lock_via_hash(i, node, node_hash) do
729
+ node_nesting = 1
730
+ old_value = nil
731
+ found_old_value = false
732
+ while node
733
+ if node.matches?(key, hash) && NULL != (old_value = node.value)
734
+ found_old_value = true
735
+ node.value = value
736
+ break
737
+ end
738
+ last = node
739
+ unless node = node.next
740
+ last.next = Node.new(hash, key, value)
741
+ break
742
+ end
743
+ node_nesting += 1
744
+ end
745
+
746
+ return true, old_value if found_old_value
747
+ increment_size
748
+ true
749
+ end
750
+ ensure
751
+ check_for_resize if node_nesting && (node_nesting > 1 || current_table.size <= 64)
752
+ end
753
+
754
+ def initialize_copy(other)
755
+ super
756
+ @counter = Concurrent::ThreadSafe::Util::Adder.new
757
+ self.table = nil
758
+ self.size_control = (other_table = other.table) ? other_table.size : DEFAULT_CAPACITY
759
+ self
760
+ end
761
+
762
+ def try_await_lock(current_table, i, node)
763
+ check_for_resize # try resizing if can't get lock
764
+ node.try_await_lock(current_table, i)
765
+ end
766
+
767
+ def key_hash(key)
768
+ key.hash & HASH_BITS
769
+ end
770
+
771
+ # Returns a power of two table size for the given desired capacity.
772
+ def table_size_for(entry_count)
773
+ size = 2
774
+ size <<= 1 while size < entry_count
775
+ size
776
+ end
777
+
778
+ # Initializes table, using the size recorded in +size_control+.
779
+ def initialize_table
780
+ until current_table ||= table
781
+ if (size_ctrl = size_control) == NOW_RESIZING
782
+ Thread.pass # lost initialization race; just spin
783
+ else
784
+ try_in_resize_lock(current_table, size_ctrl) do
785
+ initial_size = size_ctrl > 0 ? size_ctrl : DEFAULT_CAPACITY
786
+ current_table = self.table = Table.new(initial_size)
787
+ initial_size - (initial_size >> 2) # 75% load factor
788
+ end
789
+ end
790
+ end
791
+ current_table
792
+ end
793
+
794
+ # If table is too small and not already resizing, creates next table and
795
+ # transfers bins. Rechecks occupancy after a transfer to see if another
796
+ # resize is already needed because resizings are lagging additions.
797
+ def check_for_resize
798
+ while (current_table = table) && MAX_CAPACITY > (table_size = current_table.size) && NOW_RESIZING != (size_ctrl = size_control) && size_ctrl < @counter.sum
799
+ try_in_resize_lock(current_table, size_ctrl) do
800
+ self.table = rebuild(current_table)
801
+ (table_size << 1) - (table_size >> 1) # 75% load factor
802
+ end
803
+ end
804
+ end
805
+
806
+ def try_in_resize_lock(current_table, size_ctrl)
807
+ if cas_size_control(size_ctrl, NOW_RESIZING)
808
+ begin
809
+ if current_table == table # recheck under lock
810
+ size_ctrl = yield # get new size_control
811
+ end
812
+ ensure
813
+ self.size_control = size_ctrl
814
+ end
815
+ end
816
+ end
817
+
818
+ # Moves and/or copies the nodes in each bin to new table. See above for explanation.
819
+ def rebuild(table)
820
+ old_table_size = table.size
821
+ new_table = table.next_in_size_table
822
+ # puts "#{old_table_size} -> #{new_table.size}"
823
+ forwarder = Node.new(MOVED, new_table, NULL)
824
+ rev_forwarder = nil
825
+ locked_indexes = nil # holds bins to revisit; nil until needed
826
+ locked_arr_idx = 0
827
+ bin = old_table_size - 1
828
+ i = bin
829
+ while true
830
+ if !(node = table.volatile_get(i))
831
+ # no lock needed (or available) if bin >= 0, because we're not popping values from locked_indexes until we've run through the whole table
832
+ redo unless (bin >= 0 ? table.cas(i, nil, forwarder) : lock_and_clean_up_reverse_forwarders(table, old_table_size, new_table, i, forwarder))
833
+ elsif Node.locked_hash?(node_hash = node.hash)
834
+ locked_indexes ||= ::Array.new
835
+ if bin < 0 && locked_arr_idx > 0
836
+ locked_arr_idx -= 1
837
+ i, locked_indexes[locked_arr_idx] = locked_indexes[locked_arr_idx], i # swap with another bin
838
+ redo
839
+ end
840
+ if bin < 0 || locked_indexes.size >= TRANSFER_BUFFER_SIZE
841
+ node.try_await_lock(table, i) # no other options -- block
842
+ redo
843
+ end
844
+ rev_forwarder ||= Node.new(MOVED, table, NULL)
845
+ redo unless table.volatile_get(i) == node && node.locked? # recheck before adding to list
846
+ locked_indexes << i
847
+ new_table.volatile_set(i, rev_forwarder)
848
+ new_table.volatile_set(i + old_table_size, rev_forwarder)
849
+ else
850
+ redo unless split_old_bin(table, new_table, i, node, node_hash, forwarder)
851
+ end
852
+
853
+ if bin > 0
854
+ i = (bin -= 1)
855
+ elsif locked_indexes && !locked_indexes.empty?
856
+ bin = -1
857
+ i = locked_indexes.pop
858
+ locked_arr_idx = locked_indexes.size - 1
859
+ else
860
+ return new_table
861
+ end
862
+ end
863
+ end
864
+
865
+ def lock_and_clean_up_reverse_forwarders(old_table, old_table_size, new_table, i, forwarder)
866
+ # transiently use a locked forwarding node
867
+ locked_forwarder = Node.new(moved_locked_hash = MOVED | LOCKED, new_table, NULL)
868
+ if old_table.cas(i, nil, locked_forwarder)
869
+ new_table.volatile_set(i, nil) # kill the potential reverse forwarders
870
+ new_table.volatile_set(i + old_table_size, nil) # kill the potential reverse forwarders
871
+ old_table.volatile_set(i, forwarder)
872
+ locked_forwarder.unlock_via_hash(moved_locked_hash, MOVED)
873
+ true
874
+ end
875
+ end
876
+
877
+ # Splits a normal bin with list headed by e into lo and hi parts; installs in given table.
878
+ def split_old_bin(table, new_table, i, node, node_hash, forwarder)
879
+ table.try_lock_via_hash(i, node, node_hash) do
880
+ split_bin(new_table, i, node, node_hash)
881
+ table.volatile_set(i, forwarder)
882
+ end
883
+ end
884
+
885
+ def split_bin(new_table, i, node, node_hash)
886
+ bit = new_table.size >> 1 # bit to split on
887
+ run_bit = node_hash & bit
888
+ last_run = nil
889
+ low = nil
890
+ high = nil
891
+ current_node = node
892
+ # this optimises for the lowest amount of volatile writes and objects created
893
+ while current_node = current_node.next
894
+ unless (b = current_node.hash & bit) == run_bit
895
+ run_bit = b
896
+ last_run = current_node
897
+ end
898
+ end
899
+ if run_bit == 0
900
+ low = last_run
901
+ else
902
+ high = last_run
903
+ end
904
+ current_node = node
905
+ until current_node == last_run
906
+ pure_hash = current_node.pure_hash
907
+ if (pure_hash & bit) == 0
908
+ low = Node.new(pure_hash, current_node.key, current_node.value, low)
909
+ else
910
+ high = Node.new(pure_hash, current_node.key, current_node.value, high)
911
+ end
912
+ current_node = current_node.next
913
+ end
914
+ new_table.volatile_set(i, low)
915
+ new_table.volatile_set(i + bit, high)
916
+ end
917
+
918
+ def increment_size
919
+ @counter.increment
920
+ end
921
+
922
+ def decrement_size(by = 1)
923
+ @counter.add(-by)
924
+ end
925
+ end
926
+ end
927
+ end