concurrent-ruby 0.9.2 → 1.0.0.pre1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +15 -1
  3. data/README.md +67 -68
  4. data/lib/concurrent.rb +14 -1
  5. data/lib/concurrent/array.rb +38 -0
  6. data/lib/concurrent/async.rb +0 -17
  7. data/lib/concurrent/atomic/abstract_thread_local_var.rb +40 -0
  8. data/lib/concurrent/atomic/atomic_boolean.rb +81 -118
  9. data/lib/concurrent/atomic/atomic_fixnum.rb +98 -162
  10. data/lib/concurrent/atomic/atomic_reference.rb +0 -7
  11. data/lib/concurrent/atomic/count_down_latch.rb +62 -103
  12. data/lib/concurrent/atomic/cyclic_barrier.rb +2 -0
  13. data/lib/concurrent/atomic/java_count_down_latch.rb +39 -0
  14. data/lib/concurrent/atomic/java_thread_local_var.rb +50 -0
  15. data/lib/concurrent/atomic/mutex_atomic_boolean.rb +60 -0
  16. data/lib/concurrent/atomic/mutex_atomic_fixnum.rb +91 -0
  17. data/lib/concurrent/atomic/mutex_count_down_latch.rb +43 -0
  18. data/lib/concurrent/atomic/mutex_semaphore.rb +115 -0
  19. data/lib/concurrent/atomic/ruby_thread_local_var.rb +172 -0
  20. data/lib/concurrent/atomic/semaphore.rb +84 -178
  21. data/lib/concurrent/atomic/thread_local_var.rb +63 -294
  22. data/lib/concurrent/atomic_reference/mutex_atomic.rb +14 -8
  23. data/lib/concurrent/atomics.rb +0 -33
  24. data/lib/concurrent/collection/java_non_concurrent_priority_queue.rb +84 -0
  25. data/lib/concurrent/collection/map/atomic_reference_map_backend.rb +921 -0
  26. data/lib/concurrent/collection/map/mri_map_backend.rb +66 -0
  27. data/lib/concurrent/collection/map/non_concurrent_map_backend.rb +142 -0
  28. data/lib/concurrent/collection/map/synchronized_map_backend.rb +86 -0
  29. data/lib/concurrent/collection/non_concurrent_priority_queue.rb +143 -0
  30. data/lib/concurrent/collection/ruby_non_concurrent_priority_queue.rb +150 -0
  31. data/lib/concurrent/concern/logging.rb +1 -1
  32. data/lib/concurrent/concern/obligation.rb +0 -12
  33. data/lib/concurrent/configuration.rb +18 -148
  34. data/lib/concurrent/delay.rb +5 -4
  35. data/lib/concurrent/exchanger.rb +327 -41
  36. data/lib/concurrent/executor/abstract_executor_service.rb +134 -0
  37. data/lib/concurrent/executor/executor.rb +4 -29
  38. data/lib/concurrent/executor/executor_service.rb +23 -359
  39. data/lib/concurrent/executor/immediate_executor.rb +3 -2
  40. data/lib/concurrent/executor/java_executor_service.rb +100 -0
  41. data/lib/concurrent/executor/java_single_thread_executor.rb +3 -2
  42. data/lib/concurrent/executor/java_thread_pool_executor.rb +3 -4
  43. data/lib/concurrent/executor/ruby_executor_service.rb +72 -0
  44. data/lib/concurrent/executor/ruby_single_thread_executor.rb +7 -5
  45. data/lib/concurrent/executor/ruby_thread_pool_executor.rb +3 -11
  46. data/lib/concurrent/executor/safe_task_executor.rb +1 -1
  47. data/lib/concurrent/executor/serial_executor_service.rb +34 -0
  48. data/lib/concurrent/executor/serialized_execution.rb +8 -31
  49. data/lib/concurrent/executor/serialized_execution_delegator.rb +28 -0
  50. data/lib/concurrent/executor/simple_executor_service.rb +1 -10
  51. data/lib/concurrent/executor/timer_set.rb +4 -8
  52. data/lib/concurrent/executors.rb +13 -2
  53. data/lib/concurrent/future.rb +2 -2
  54. data/lib/concurrent/hash.rb +35 -0
  55. data/lib/concurrent/ivar.rb +9 -14
  56. data/lib/concurrent/map.rb +178 -0
  57. data/lib/concurrent/promise.rb +2 -2
  58. data/lib/concurrent/scheduled_task.rb +9 -69
  59. data/lib/concurrent/thread_safe/synchronized_delegator.rb +50 -0
  60. data/lib/concurrent/thread_safe/util.rb +23 -0
  61. data/lib/concurrent/thread_safe/util/adder.rb +71 -0
  62. data/lib/concurrent/thread_safe/util/array_hash_rbx.rb +28 -0
  63. data/lib/concurrent/thread_safe/util/cheap_lockable.rb +115 -0
  64. data/lib/concurrent/thread_safe/util/power_of_two_tuple.rb +37 -0
  65. data/lib/concurrent/thread_safe/util/striped64.rb +236 -0
  66. data/lib/concurrent/thread_safe/util/volatile.rb +73 -0
  67. data/lib/concurrent/thread_safe/util/xor_shift_random.rb +48 -0
  68. data/lib/concurrent/timer_task.rb +3 -3
  69. data/lib/concurrent/tuple.rb +86 -0
  70. data/lib/concurrent/version.rb +2 -2
  71. metadata +37 -10
  72. data/lib/concurrent/atomic/condition.rb +0 -78
  73. data/lib/concurrent/collection/priority_queue.rb +0 -360
  74. data/lib/concurrent/utilities.rb +0 -5
  75. data/lib/concurrent/utility/timeout.rb +0 -39
  76. data/lib/concurrent/utility/timer.rb +0 -26
  77. data/lib/concurrent_ruby.rb +0 -2
@@ -1,4 +1,4 @@
1
- require 'thread'
1
+ require 'concurrent/synchronization'
2
2
  require 'concurrent/atomic_reference/direct_update'
3
3
  require 'concurrent/atomic_reference/numeric_cas_wrapper'
4
4
 
@@ -8,31 +8,31 @@ module Concurrent
8
8
  #
9
9
  # @!visibility private
10
10
  # @!macro internal_implementation_note
11
- class MutexAtomicReference
11
+ class MutexAtomicReference < Synchronization::Object
12
12
  include Concurrent::AtomicDirectUpdate
13
13
  include Concurrent::AtomicNumericCompareAndSetWrapper
14
14
 
15
15
  # @!macro atomic_reference_method_initialize
16
16
  def initialize(value = nil)
17
- @mutex = Mutex.new
18
- @value = value
17
+ super()
18
+ synchronize { ns_initialize(value) }
19
19
  end
20
20
 
21
21
  # @!macro atomic_reference_method_get
22
22
  def get
23
- @mutex.synchronize { @value }
23
+ synchronize { @value }
24
24
  end
25
25
  alias_method :value, :get
26
26
 
27
27
  # @!macro atomic_reference_method_set
28
28
  def set(new_value)
29
- @mutex.synchronize { @value = new_value }
29
+ synchronize { @value = new_value }
30
30
  end
31
31
  alias_method :value=, :set
32
32
 
33
33
  # @!macro atomic_reference_method_get_and_set
34
34
  def get_and_set(new_value)
35
- @mutex.synchronize do
35
+ synchronize do
36
36
  old_value = @value
37
37
  @value = new_value
38
38
  old_value
@@ -42,7 +42,7 @@ module Concurrent
42
42
 
43
43
  # @!macro atomic_reference_method_compare_and_set
44
44
  def _compare_and_set(old_value, new_value)
45
- @mutex.synchronize do
45
+ synchronize do
46
46
  if @value.equal? old_value
47
47
  @value = new_value
48
48
  true
@@ -51,5 +51,11 @@ module Concurrent
51
51
  end
52
52
  end
53
53
  end
54
+
55
+ protected
56
+
57
+ def ns_initialize(value)
58
+ @value = value
59
+ end
54
60
  end
55
61
  end
@@ -2,38 +2,6 @@
2
2
  #
3
3
  # An object reference that may be updated atomically.
4
4
  #
5
- # Testing with ruby 2.1.2
6
- #
7
- # *** Sequential updates ***
8
- # user system total real
9
- # no lock 0.000000 0.000000 0.000000 ( 0.005502)
10
- # mutex 0.030000 0.000000 0.030000 ( 0.025158)
11
- # MutexAtomicReference 0.100000 0.000000 0.100000 ( 0.103096)
12
- # CAtomicReference 0.040000 0.000000 0.040000 ( 0.034012)
13
- #
14
- # *** Parallel updates ***
15
- # user system total real
16
- # no lock 0.010000 0.000000 0.010000 ( 0.009387)
17
- # mutex 0.030000 0.010000 0.040000 ( 0.032545)
18
- # MutexAtomicReference 0.830000 2.280000 3.110000 ( 2.146622)
19
- # CAtomicReference 0.040000 0.000000 0.040000 ( 0.038332)
20
- #
21
- # Testing with jruby 1.9.3
22
- #
23
- # *** Sequential updates ***
24
- # user system total real
25
- # no lock 0.170000 0.000000 0.170000 ( 0.051000)
26
- # mutex 0.370000 0.010000 0.380000 ( 0.121000)
27
- # MutexAtomicReference 1.530000 0.020000 1.550000 ( 0.471000)
28
- # JavaAtomicReference 0.370000 0.010000 0.380000 ( 0.112000)
29
- #
30
- # *** Parallel updates ***
31
- # user system total real
32
- # no lock 0.390000 0.000000 0.390000 ( 0.105000)
33
- # mutex 0.480000 0.040000 0.520000 ( 0.145000)
34
- # MutexAtomicReference 1.600000 0.180000 1.780000 ( 0.511000)
35
- # JavaAtomicReference 0.460000 0.010000 0.470000 ( 0.131000)
36
- #
37
5
  # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html
38
6
  # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/package-summary.html
39
7
  #
@@ -73,7 +41,6 @@
73
41
  require 'concurrent/atomic/atomic_reference'
74
42
  require 'concurrent/atomic/atomic_boolean'
75
43
  require 'concurrent/atomic/atomic_fixnum'
76
- require 'concurrent/atomic/condition'
77
44
  require 'concurrent/atomic/cyclic_barrier'
78
45
  require 'concurrent/atomic/count_down_latch'
79
46
  require 'concurrent/atomic/event'
@@ -0,0 +1,84 @@
1
+ if Concurrent.on_jruby?
2
+
3
+ module Concurrent
4
+ module Collection
5
+
6
+
7
+ # @!macro priority_queue
8
+ #
9
+ # @!visibility private
10
+ # @!macro internal_implementation_note
11
+ class JavaNonConcurrentPriorityQueue
12
+
13
+ # @!macro priority_queue_method_initialize
14
+ def initialize(opts = {})
15
+ order = opts.fetch(:order, :max)
16
+ if [:min, :low].include?(order)
17
+ @queue = java.util.PriorityQueue.new(11) # 11 is the default initial capacity
18
+ else
19
+ @queue = java.util.PriorityQueue.new(11, java.util.Collections.reverseOrder())
20
+ end
21
+ end
22
+
23
+ # @!macro priority_queue_method_clear
24
+ def clear
25
+ @queue.clear
26
+ true
27
+ end
28
+
29
+ # @!macro priority_queue_method_delete
30
+ def delete(item)
31
+ found = false
32
+ while @queue.remove(item) do
33
+ found = true
34
+ end
35
+ found
36
+ end
37
+
38
+ # @!macro priority_queue_method_empty
39
+ def empty?
40
+ @queue.size == 0
41
+ end
42
+
43
+ # @!macro priority_queue_method_include
44
+ def include?(item)
45
+ @queue.contains(item)
46
+ end
47
+ alias_method :has_priority?, :include?
48
+
49
+ # @!macro priority_queue_method_length
50
+ def length
51
+ @queue.size
52
+ end
53
+ alias_method :size, :length
54
+
55
+ # @!macro priority_queue_method_peek
56
+ def peek
57
+ @queue.peek
58
+ end
59
+
60
+ # @!macro priority_queue_method_pop
61
+ def pop
62
+ @queue.poll
63
+ end
64
+ alias_method :deq, :pop
65
+ alias_method :shift, :pop
66
+
67
+ # @!macro priority_queue_method_push
68
+ def push(item)
69
+ raise ArgumentError.new('cannot enqueue nil') if item.nil?
70
+ @queue.add(item)
71
+ end
72
+ alias_method :<<, :push
73
+ alias_method :enq, :push
74
+
75
+ # @!macro priority_queue_method_from_list
76
+ def self.from_list(list, opts = {})
77
+ queue = new(opts)
78
+ list.each{|item| queue << item }
79
+ queue
80
+ end
81
+ end
82
+ end
83
+ end
84
+ end
@@ -0,0 +1,921 @@
1
+ require 'concurrent/thread_safe/util'
2
+
3
+ module Concurrent
4
+
5
+ # @!visibility private
6
+ module Collection
7
+
8
+ # A Ruby port of the Doug Lea's jsr166e.ConcurrentHashMapV8 class version 1.59
9
+ # available in public domain.
10
+ #
11
+ # Original source code available here:
12
+ # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ConcurrentHashMapV8.java?revision=1.59
13
+ #
14
+ # The Ruby port skips out the +TreeBin+ (red-black trees for use in bins whose
15
+ # size exceeds a threshold).
16
+ #
17
+ # A hash table supporting full concurrency of retrievals and high expected
18
+ # concurrency for updates. However, even though all operations are
19
+ # thread-safe, retrieval operations do _not_ entail locking, and there is
20
+ # _not_ any support for locking the entire table in a way that prevents all
21
+ # access.
22
+ #
23
+ # Retrieval operations generally do not block, so may overlap with update
24
+ # operations. Retrievals reflect the results of the most recently _completed_
25
+ # update operations holding upon their onset. (More formally, an update
26
+ # operation for a given key bears a _happens-before_ relation with any (non
27
+ # +nil+) retrieval for that key reporting the updated value.) For aggregate
28
+ # operations such as +clear()+, concurrent retrievals may reflect insertion or
29
+ # removal of only some entries. Similarly, the +each_pair+ iterator yields
30
+ # elements reflecting the state of the hash table at some point at or since
31
+ # the start of the +each_pair+. Bear in mind that the results of aggregate
32
+ # status methods including +size()+ and +empty?+} are typically useful only
33
+ # when a map is not undergoing concurrent updates in other threads. Otherwise
34
+ # the results of these methods reflect transient states that may be adequate
35
+ # for monitoring or estimation purposes, but not for program control.
36
+ #
37
+ # The table is dynamically expanded when there are too many collisions (i.e.,
38
+ # keys that have distinct hash codes but fall into the same slot modulo the
39
+ # table size), with the expected average effect of maintaining roughly two
40
+ # bins per mapping (corresponding to a 0.75 load factor threshold for
41
+ # resizing). There may be much variance around this average as mappings are
42
+ # added and removed, but overall, this maintains a commonly accepted
43
+ # time/space tradeoff for hash tables. However, resizing this or any other
44
+ # kind of hash table may be a relatively slow operation. When possible, it is
45
+ # a good idea to provide a size estimate as an optional :initial_capacity
46
+ # initializer argument. An additional optional :load_factor constructor
47
+ # argument provides a further means of customizing initial table capacity by
48
+ # specifying the table density to be used in calculating the amount of space
49
+ # to allocate for the given number of elements. Note that using many keys with
50
+ # exactly the same +hash+ is a sure way to slow down performance of any hash
51
+ # table.
52
+ #
53
+ # ## Design overview
54
+ #
55
+ # The primary design goal of this hash table is to maintain concurrent
56
+ # readability (typically method +[]+, but also iteration and related methods)
57
+ # while minimizing update contention. Secondary goals are to keep space
58
+ # consumption about the same or better than plain +Hash+, and to support high
59
+ # initial insertion rates on an empty table by many threads.
60
+ #
61
+ # Each key-value mapping is held in a +Node+. The validation-based approach
62
+ # explained below leads to a lot of code sprawl because retry-control
63
+ # precludes factoring into smaller methods.
64
+ #
65
+ # The table is lazily initialized to a power-of-two size upon the first
66
+ # insertion. Each bin in the table normally contains a list of +Node+s (most
67
+ # often, the list has only zero or one +Node+). Table accesses require
68
+ # volatile/atomic reads, writes, and CASes. The lists of nodes within bins are
69
+ # always accurately traversable under volatile reads, so long as lookups check
70
+ # hash code and non-nullness of value before checking key equality.
71
+ #
72
+ # We use the top two bits of +Node+ hash fields for control purposes -- they
73
+ # are available anyway because of addressing constraints. As explained further
74
+ # below, these top bits are used as follows:
75
+ #
76
+ # - 00 - Normal
77
+ # - 01 - Locked
78
+ # - 11 - Locked and may have a thread waiting for lock
79
+ # - 10 - +Node+ is a forwarding node
80
+ #
81
+ # The lower 28 bits of each +Node+'s hash field contain a the key's hash code,
82
+ # except for forwarding nodes, for which the lower bits are zero (and so
83
+ # always have hash field == +MOVED+).
84
+ #
85
+ # Insertion (via +[]=+ or its variants) of the first node in an empty bin is
86
+ # performed by just CASing it to the bin. This is by far the most common case
87
+ # for put operations under most key/hash distributions. Other update
88
+ # operations (insert, delete, and replace) require locks. We do not want to
89
+ # waste the space required to associate a distinct lock object with each bin,
90
+ # so instead use the first node of a bin list itself as a lock. Blocking
91
+ # support for these locks relies +Concurrent::ThreadSafe::Util::CheapLockable. However, we also need a
92
+ # +try_lock+ construction, so we overlay these by using bits of the +Node+
93
+ # hash field for lock control (see above), and so normally use builtin
94
+ # monitors only for blocking and signalling using
95
+ # +cheap_wait+/+cheap_broadcast+ constructions. See +Node#try_await_lock+.
96
+ #
97
+ # Using the first node of a list as a lock does not by itself suffice though:
98
+ # When a node is locked, any update must first validate that it is still the
99
+ # first node after locking it, and retry if not. Because new nodes are always
100
+ # appended to lists, once a node is first in a bin, it remains first until
101
+ # deleted or the bin becomes invalidated (upon resizing). However, operations
102
+ # that only conditionally update may inspect nodes until the point of update.
103
+ # This is a converse of sorts to the lazy locking technique described by
104
+ # Herlihy & Shavit.
105
+ #
106
+ # The main disadvantage of per-bin locks is that other update operations on
107
+ # other nodes in a bin list protected by the same lock can stall, for example
108
+ # when user +eql?+ or mapping functions take a long time. However,
109
+ # statistically, under random hash codes, this is not a common problem.
110
+ # Ideally, the frequency of nodes in bins follows a Poisson distribution
111
+ # (http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of
112
+ # about 0.5 on average, given the resizing threshold of 0.75, although with a
113
+ # large variance because of resizing granularity. Ignoring variance, the
114
+ # expected occurrences of list size k are (exp(-0.5) * pow(0.5, k) /
115
+ # factorial(k)). The first values are:
116
+ #
117
+ # - 0: 0.60653066
118
+ # - 1: 0.30326533
119
+ # - 2: 0.07581633
120
+ # - 3: 0.01263606
121
+ # - 4: 0.00157952
122
+ # - 5: 0.00015795
123
+ # - 6: 0.00001316
124
+ # - 7: 0.00000094
125
+ # - 8: 0.00000006
126
+ # - more: less than 1 in ten million
127
+ #
128
+ # Lock contention probability for two threads accessing distinct elements is
129
+ # roughly 1 / (8 * #elements) under random hashes.
130
+ #
131
+ # The table is resized when occupancy exceeds a percentage threshold
132
+ # (nominally, 0.75, but see below). Only a single thread performs the resize
133
+ # (using field +size_control+, to arrange exclusion), but the table otherwise
134
+ # remains usable for reads and updates. Resizing proceeds by transferring
135
+ # bins, one by one, from the table to the next table. Because we are using
136
+ # power-of-two expansion, the elements from each bin must either stay at same
137
+ # index, or move with a power of two offset. We eliminate unnecessary node
138
+ # creation by catching cases where old nodes can be reused because their next
139
+ # fields won't change. On average, only about one-sixth of them need cloning
140
+ # when a table doubles. The nodes they replace will be garbage collectable as
141
+ # soon as they are no longer referenced by any reader thread that may be in
142
+ # the midst of concurrently traversing table. Upon transfer, the old table bin
143
+ # contains only a special forwarding node (with hash field +MOVED+) that
144
+ # contains the next table as its key. On encountering a forwarding node,
145
+ # access and update operations restart, using the new table.
146
+ #
147
+ # Each bin transfer requires its bin lock. However, unlike other cases, a
148
+ # transfer can skip a bin if it fails to acquire its lock, and revisit it
149
+ # later. Method +rebuild+ maintains a buffer of TRANSFER_BUFFER_SIZE bins that
150
+ # have been skipped because of failure to acquire a lock, and blocks only if
151
+ # none are available (i.e., only very rarely). The transfer operation must
152
+ # also ensure that all accessible bins in both the old and new table are
153
+ # usable by any traversal. When there are no lock acquisition failures, this
154
+ # is arranged simply by proceeding from the last bin (+table.size - 1+) up
155
+ # towards the first. Upon seeing a forwarding node, traversals arrange to move
156
+ # to the new table without revisiting nodes. However, when any node is skipped
157
+ # during a transfer, all earlier table bins may have become visible, so are
158
+ # initialized with a reverse-forwarding node back to the old table until the
159
+ # new ones are established. (This sometimes requires transiently locking a
160
+ # forwarding node, which is possible under the above encoding.) These more
161
+ # expensive mechanics trigger only when necessary.
162
+ #
163
+ # The traversal scheme also applies to partial traversals of
164
+ # ranges of bins (via an alternate Traverser constructor)
165
+ # to support partitioned aggregate operations. Also, read-only
166
+ # operations give up if ever forwarded to a null table, which
167
+ # provides support for shutdown-style clearing, which is also not
168
+ # currently implemented.
169
+ #
170
+ # Lazy table initialization minimizes footprint until first use.
171
+ #
172
+ # The element count is maintained using a +Concurrent::ThreadSafe::Util::Adder+,
173
+ # which avoids contention on updates but can encounter cache thrashing
174
+ # if read too frequently during concurrent access. To avoid reading so
175
+ # often, resizing is attempted either when a bin lock is
176
+ # contended, or upon adding to a bin already holding two or more
177
+ # nodes (checked before adding in the +x_if_absent+ methods, after
178
+ # adding in others). Under uniform hash distributions, the
179
+ # probability of this occurring at threshold is around 13%,
180
+ # meaning that only about 1 in 8 puts check threshold (and after
181
+ # resizing, many fewer do so). But this approximation has high
182
+ # variance for small table sizes, so we check on any collision
183
+ # for sizes <= 64. The bulk putAll operation further reduces
184
+ # contention by only committing count updates upon these size
185
+ # checks.
186
+ #
187
+ # @!visibility private
188
+ class AtomicReferenceMapBackend
189
+
190
+ # @!visibility private
191
+ class Table < Concurrent::ThreadSafe::Util::PowerOfTwoTuple
192
+ def cas_new_node(i, hash, key, value)
193
+ cas(i, nil, Node.new(hash, key, value))
194
+ end
195
+
196
+ def try_to_cas_in_computed(i, hash, key)
197
+ succeeded = false
198
+ new_value = nil
199
+ new_node = Node.new(locked_hash = hash | LOCKED, key, NULL)
200
+ if cas(i, nil, new_node)
201
+ begin
202
+ if NULL == (new_value = yield(NULL))
203
+ was_null = true
204
+ else
205
+ new_node.value = new_value
206
+ end
207
+ succeeded = true
208
+ ensure
209
+ volatile_set(i, nil) if !succeeded || was_null
210
+ new_node.unlock_via_hash(locked_hash, hash)
211
+ end
212
+ end
213
+ return succeeded, new_value
214
+ end
215
+
216
+ def try_lock_via_hash(i, node, node_hash)
217
+ node.try_lock_via_hash(node_hash) do
218
+ yield if volatile_get(i) == node
219
+ end
220
+ end
221
+
222
+ def delete_node_at(i, node, predecessor_node)
223
+ if predecessor_node
224
+ predecessor_node.next = node.next
225
+ else
226
+ volatile_set(i, node.next)
227
+ end
228
+ end
229
+ end
230
+
231
+ # Key-value entry. Nodes with a hash field of +MOVED+ are special, and do
232
+ # not contain user keys or values. Otherwise, keys are never +nil+, and
233
+ # +NULL+ +value+ fields indicate that a node is in the process of being
234
+ # deleted or created. For purposes of read-only access, a key may be read
235
+ # before a value, but can only be used after checking value to be +!= NULL+.
236
+ #
237
+ # @!visibility private
238
+ class Node
239
+ extend Concurrent::ThreadSafe::Util::Volatile
240
+ attr_volatile :hash, :value, :next
241
+
242
+ include Concurrent::ThreadSafe::Util::CheapLockable
243
+
244
+ bit_shift = Concurrent::ThreadSafe::Util::FIXNUM_BIT_SIZE - 2 # need 2 bits for ourselves
245
+ # Encodings for special uses of Node hash fields. See above for explanation.
246
+ MOVED = ('10' << ('0' * bit_shift)).to_i(2) # hash field for forwarding nodes
247
+ LOCKED = ('01' << ('0' * bit_shift)).to_i(2) # set/tested only as a bit
248
+ WAITING = ('11' << ('0' * bit_shift)).to_i(2) # both bits set/tested together
249
+ HASH_BITS = ('00' << ('1' * bit_shift)).to_i(2) # usable bits of normal node hash
250
+
251
+ SPIN_LOCK_ATTEMPTS = Concurrent::ThreadSafe::Util::CPU_COUNT > 1 ? Concurrent::ThreadSafe::Util::CPU_COUNT * 2 : 0
252
+
253
+ attr_reader :key
254
+
255
+ def initialize(hash, key, value, next_node = nil)
256
+ super()
257
+ @key = key
258
+ self.lazy_set_hash(hash)
259
+ self.lazy_set_value(value)
260
+ self.next = next_node
261
+ end
262
+
263
+ # Spins a while if +LOCKED+ bit set and this node is the first of its bin,
264
+ # and then sets +WAITING+ bits on hash field and blocks (once) if they are
265
+ # still set. It is OK for this method to return even if lock is not
266
+ # available upon exit, which enables these simple single-wait mechanics.
267
+ #
268
+ # The corresponding signalling operation is performed within callers: Upon
269
+ # detecting that +WAITING+ has been set when unlocking lock (via a failed
270
+ # CAS from non-waiting +LOCKED+ state), unlockers acquire the
271
+ # +cheap_synchronize+ lock and perform a +cheap_broadcast+.
272
+ def try_await_lock(table, i)
273
+ if table && i >= 0 && i < table.size # bounds check, TODO: why are we bounds checking?
274
+ spins = SPIN_LOCK_ATTEMPTS
275
+ randomizer = base_randomizer = Concurrent::ThreadSafe::Util::XorShiftRandom.get
276
+ while equal?(table.volatile_get(i)) && self.class.locked_hash?(my_hash = hash)
277
+ if spins >= 0
278
+ if (randomizer = (randomizer >> 1)).even? # spin at random
279
+ if (spins -= 1) == 0
280
+ Thread.pass # yield before blocking
281
+ else
282
+ randomizer = base_randomizer = Concurrent::ThreadSafe::Util::XorShiftRandom.xorshift(base_randomizer) if randomizer.zero?
283
+ end
284
+ end
285
+ elsif cas_hash(my_hash, my_hash | WAITING)
286
+ force_aquire_lock(table, i)
287
+ break
288
+ end
289
+ end
290
+ end
291
+ end
292
+
293
+ def key?(key)
294
+ @key.eql?(key)
295
+ end
296
+
297
+ def matches?(key, hash)
298
+ pure_hash == hash && key?(key)
299
+ end
300
+
301
+ def pure_hash
302
+ hash & HASH_BITS
303
+ end
304
+
305
+ def try_lock_via_hash(node_hash = hash)
306
+ if cas_hash(node_hash, locked_hash = node_hash | LOCKED)
307
+ begin
308
+ yield
309
+ ensure
310
+ unlock_via_hash(locked_hash, node_hash)
311
+ end
312
+ end
313
+ end
314
+
315
+ def locked?
316
+ self.class.locked_hash?(hash)
317
+ end
318
+
319
+ def unlock_via_hash(locked_hash, node_hash)
320
+ unless cas_hash(locked_hash, node_hash)
321
+ self.hash = node_hash
322
+ cheap_synchronize { cheap_broadcast }
323
+ end
324
+ end
325
+
326
+ private
327
+ def force_aquire_lock(table, i)
328
+ cheap_synchronize do
329
+ if equal?(table.volatile_get(i)) && (hash & WAITING) == WAITING
330
+ cheap_wait
331
+ else
332
+ cheap_broadcast # possibly won race vs signaller
333
+ end
334
+ end
335
+ end
336
+
337
+ class << self
338
+ def locked_hash?(hash)
339
+ (hash & LOCKED) != 0
340
+ end
341
+ end
342
+ end
343
+
344
+ # shorthands
345
+ MOVED = Node::MOVED
346
+ LOCKED = Node::LOCKED
347
+ WAITING = Node::WAITING
348
+ HASH_BITS = Node::HASH_BITS
349
+
350
+ NOW_RESIZING = -1
351
+ DEFAULT_CAPACITY = 16
352
+ MAX_CAPACITY = Concurrent::ThreadSafe::Util::MAX_INT
353
+
354
+ # The buffer size for skipped bins during transfers. The
355
+ # value is arbitrary but should be large enough to avoid
356
+ # most locking stalls during resizes.
357
+ TRANSFER_BUFFER_SIZE = 32
358
+
359
+ extend Concurrent::ThreadSafe::Util::Volatile
360
+ attr_volatile :table, # The array of bins. Lazily initialized upon first insertion. Size is always a power of two.
361
+
362
+ # Table initialization and resizing control. When negative, the
363
+ # table is being initialized or resized. Otherwise, when table is
364
+ # null, holds the initial table size to use upon creation, or 0
365
+ # for default. After initialization, holds the next element count
366
+ # value upon which to resize the table.
367
+ :size_control
368
+
369
+ def initialize(options = nil)
370
+ super()
371
+ @counter = Concurrent::ThreadSafe::Util::Adder.new
372
+ initial_capacity = options && options[:initial_capacity] || DEFAULT_CAPACITY
373
+ self.size_control = (capacity = table_size_for(initial_capacity)) > MAX_CAPACITY ? MAX_CAPACITY : capacity
374
+ end
375
+
376
+ def get_or_default(key, else_value = nil)
377
+ hash = key_hash(key)
378
+ current_table = table
379
+ while current_table
380
+ node = current_table.volatile_get_by_hash(hash)
381
+ current_table =
382
+ while node
383
+ if (node_hash = node.hash) == MOVED
384
+ break node.key
385
+ elsif (node_hash & HASH_BITS) == hash && node.key?(key) && NULL != (value = node.value)
386
+ return value
387
+ end
388
+ node = node.next
389
+ end
390
+ end
391
+ else_value
392
+ end
393
+
394
+ def [](key)
395
+ get_or_default(key)
396
+ end
397
+
398
+ def key?(key)
399
+ get_or_default(key, NULL) != NULL
400
+ end
401
+
402
+ def []=(key, value)
403
+ get_and_set(key, value)
404
+ value
405
+ end
406
+
407
+ def compute_if_absent(key)
408
+ hash = key_hash(key)
409
+ current_table = table || initialize_table
410
+ while true
411
+ if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash)))
412
+ succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key) { yield }
413
+ if succeeded
414
+ increment_size
415
+ return new_value
416
+ end
417
+ elsif (node_hash = node.hash) == MOVED
418
+ current_table = node.key
419
+ elsif NULL != (current_value = find_value_in_node_list(node, key, hash, node_hash & HASH_BITS))
420
+ return current_value
421
+ elsif Node.locked_hash?(node_hash)
422
+ try_await_lock(current_table, i, node)
423
+ else
424
+ succeeded, value = attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) { yield }
425
+ return value if succeeded
426
+ end
427
+ end
428
+ end
429
+
430
+ def compute_if_present(key)
431
+ new_value = nil
432
+ internal_replace(key) do |old_value|
433
+ if (new_value = yield(NULL == old_value ? nil : old_value)).nil?
434
+ NULL
435
+ else
436
+ new_value
437
+ end
438
+ end
439
+ new_value
440
+ end
441
+
442
+ def compute(key)
443
+ internal_compute(key) do |old_value|
444
+ if (new_value = yield(NULL == old_value ? nil : old_value)).nil?
445
+ NULL
446
+ else
447
+ new_value
448
+ end
449
+ end
450
+ end
451
+
452
+ def merge_pair(key, value)
453
+ internal_compute(key) do |old_value|
454
+ if NULL == old_value || !(value = yield(old_value)).nil?
455
+ value
456
+ else
457
+ NULL
458
+ end
459
+ end
460
+ end
461
+
462
+ def replace_pair(key, old_value, new_value)
463
+ NULL != internal_replace(key, old_value) { new_value }
464
+ end
465
+
466
+ def replace_if_exists(key, new_value)
467
+ if (result = internal_replace(key) { new_value }) && NULL != result
468
+ result
469
+ end
470
+ end
471
+
472
+ def get_and_set(key, value) # internalPut in the original CHMV8
473
+ hash = key_hash(key)
474
+ current_table = table || initialize_table
475
+ while true
476
+ if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash)))
477
+ if current_table.cas_new_node(i, hash, key, value)
478
+ increment_size
479
+ break
480
+ end
481
+ elsif (node_hash = node.hash) == MOVED
482
+ current_table = node.key
483
+ elsif Node.locked_hash?(node_hash)
484
+ try_await_lock(current_table, i, node)
485
+ else
486
+ succeeded, old_value = attempt_get_and_set(key, value, hash, current_table, i, node, node_hash)
487
+ break old_value if succeeded
488
+ end
489
+ end
490
+ end
491
+
492
+ def delete(key)
493
+ replace_if_exists(key, NULL)
494
+ end
495
+
496
+ def delete_pair(key, value)
497
+ result = internal_replace(key, value) { NULL }
498
+ if result && NULL != result
499
+ !!result
500
+ else
501
+ false
502
+ end
503
+ end
504
+
505
+ def each_pair
506
+ return self unless current_table = table
507
+ current_table_size = base_size = current_table.size
508
+ i = base_index = 0
509
+ while base_index < base_size
510
+ if node = current_table.volatile_get(i)
511
+ if node.hash == MOVED
512
+ current_table = node.key
513
+ current_table_size = current_table.size
514
+ else
515
+ begin
516
+ if NULL != (value = node.value) # skip deleted or special nodes
517
+ yield node.key, value
518
+ end
519
+ end while node = node.next
520
+ end
521
+ end
522
+
523
+ if (i_with_base = i + base_size) < current_table_size
524
+ i = i_with_base # visit upper slots if present
525
+ else
526
+ i = base_index += 1
527
+ end
528
+ end
529
+ self
530
+ end
531
+
532
+ def size
533
+ (sum = @counter.sum) < 0 ? 0 : sum # ignore transient negative values
534
+ end
535
+
536
+ def empty?
537
+ size == 0
538
+ end
539
+
540
+ # Implementation for clear. Steps through each bin, removing all nodes.
541
+ def clear
542
+ return self unless current_table = table
543
+ current_table_size = current_table.size
544
+ deleted_count = i = 0
545
+ while i < current_table_size
546
+ if !(node = current_table.volatile_get(i))
547
+ i += 1
548
+ elsif (node_hash = node.hash) == MOVED
549
+ current_table = node.key
550
+ current_table_size = current_table.size
551
+ elsif Node.locked_hash?(node_hash)
552
+ decrement_size(deleted_count) # opportunistically update count
553
+ deleted_count = 0
554
+ node.try_await_lock(current_table, i)
555
+ else
556
+ current_table.try_lock_via_hash(i, node, node_hash) do
557
+ begin
558
+ deleted_count += 1 if NULL != node.value # recheck under lock
559
+ node.value = nil
560
+ end while node = node.next
561
+ current_table.volatile_set(i, nil)
562
+ i += 1
563
+ end
564
+ end
565
+ end
566
+ decrement_size(deleted_count)
567
+ self
568
+ end
569
+
570
+ private
571
+ # Internal versions of the insertion methods, each a
572
+ # little more complicated than the last. All have
573
+ # the same basic structure:
574
+ # 1. If table uninitialized, create
575
+ # 2. If bin empty, try to CAS new node
576
+ # 3. If bin stale, use new table
577
+ # 4. Lock and validate; if valid, scan and add or update
578
+ #
579
+ # The others interweave other checks and/or alternative actions:
580
+ # * Plain +get_and_set+ checks for and performs resize after insertion.
581
+ # * compute_if_absent prescans for mapping without lock (and fails to add
582
+ # if present), which also makes pre-emptive resize checks worthwhile.
583
+ #
584
+ # Someday when details settle down a bit more, it might be worth
585
+ # some factoring to reduce sprawl.
586
+ def internal_replace(key, expected_old_value = NULL, &block)
587
+ hash = key_hash(key)
588
+ current_table = table
589
+ while current_table
590
+ if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash)))
591
+ break
592
+ elsif (node_hash = node.hash) == MOVED
593
+ current_table = node.key
594
+ elsif (node_hash & HASH_BITS) != hash && !node.next # precheck
595
+ break # rules out possible existence
596
+ elsif Node.locked_hash?(node_hash)
597
+ try_await_lock(current_table, i, node)
598
+ else
599
+ succeeded, old_value = attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash, &block)
600
+ return old_value if succeeded
601
+ end
602
+ end
603
+ NULL
604
+ end
605
+
606
+ def attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash)
607
+ current_table.try_lock_via_hash(i, node, node_hash) do
608
+ predecessor_node = nil
609
+ old_value = NULL
610
+ begin
611
+ if node.matches?(key, hash) && NULL != (current_value = node.value)
612
+ if NULL == expected_old_value || expected_old_value == current_value # NULL == expected_old_value means whatever value
613
+ old_value = current_value
614
+ if NULL == (node.value = yield(old_value))
615
+ current_table.delete_node_at(i, node, predecessor_node)
616
+ decrement_size
617
+ end
618
+ end
619
+ break
620
+ end
621
+
622
+ predecessor_node = node
623
+ end while node = node.next
624
+
625
+ return true, old_value
626
+ end
627
+ end
628
+
629
+ def find_value_in_node_list(node, key, hash, pure_hash)
630
+ do_check_for_resize = false
631
+ while true
632
+ if pure_hash == hash && node.key?(key) && NULL != (value = node.value)
633
+ return value
634
+ elsif node = node.next
635
+ do_check_for_resize = true # at least 2 nodes -> check for resize
636
+ pure_hash = node.pure_hash
637
+ else
638
+ return NULL
639
+ end
640
+ end
641
+ ensure
642
+ check_for_resize if do_check_for_resize
643
+ end
644
+
645
+ def internal_compute(key, &block)
646
+ hash = key_hash(key)
647
+ current_table = table || initialize_table
648
+ while true
649
+ if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash)))
650
+ succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key, &block)
651
+ if succeeded
652
+ if NULL == new_value
653
+ break nil
654
+ else
655
+ increment_size
656
+ break new_value
657
+ end
658
+ end
659
+ elsif (node_hash = node.hash) == MOVED
660
+ current_table = node.key
661
+ elsif Node.locked_hash?(node_hash)
662
+ try_await_lock(current_table, i, node)
663
+ else
664
+ succeeded, new_value = attempt_compute(key, hash, current_table, i, node, node_hash, &block)
665
+ break new_value if succeeded
666
+ end
667
+ end
668
+ end
669
+
670
+ def attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash)
671
+ added = false
672
+ current_table.try_lock_via_hash(i, node, node_hash) do
673
+ while true
674
+ if node.matches?(key, hash) && NULL != (value = node.value)
675
+ return true, value
676
+ end
677
+ last = node
678
+ unless node = node.next
679
+ last.next = Node.new(hash, key, value = yield)
680
+ added = true
681
+ increment_size
682
+ return true, value
683
+ end
684
+ end
685
+ end
686
+ ensure
687
+ check_for_resize if added
688
+ end
689
+
690
+ def attempt_compute(key, hash, current_table, i, node, node_hash)
691
+ added = false
692
+ current_table.try_lock_via_hash(i, node, node_hash) do
693
+ predecessor_node = nil
694
+ while true
695
+ if node.matches?(key, hash) && NULL != (value = node.value)
696
+ if NULL == (node.value = value = yield(value))
697
+ current_table.delete_node_at(i, node, predecessor_node)
698
+ decrement_size
699
+ value = nil
700
+ end
701
+ return true, value
702
+ end
703
+ predecessor_node = node
704
+ unless node = node.next
705
+ if NULL == (value = yield(NULL))
706
+ value = nil
707
+ else
708
+ predecessor_node.next = Node.new(hash, key, value)
709
+ added = true
710
+ increment_size
711
+ end
712
+ return true, value
713
+ end
714
+ end
715
+ end
716
+ ensure
717
+ check_for_resize if added
718
+ end
719
+
720
+ def attempt_get_and_set(key, value, hash, current_table, i, node, node_hash)
721
+ node_nesting = nil
722
+ current_table.try_lock_via_hash(i, node, node_hash) do
723
+ node_nesting = 1
724
+ old_value = nil
725
+ found_old_value = false
726
+ while node
727
+ if node.matches?(key, hash) && NULL != (old_value = node.value)
728
+ found_old_value = true
729
+ node.value = value
730
+ break
731
+ end
732
+ last = node
733
+ unless node = node.next
734
+ last.next = Node.new(hash, key, value)
735
+ break
736
+ end
737
+ node_nesting += 1
738
+ end
739
+
740
+ return true, old_value if found_old_value
741
+ increment_size
742
+ true
743
+ end
744
+ ensure
745
+ check_for_resize if node_nesting && (node_nesting > 1 || current_table.size <= 64)
746
+ end
747
+
748
+ def initialize_copy(other)
749
+ super
750
+ @counter = Concurrent::ThreadSafe::Util::Adder.new
751
+ self.table = nil
752
+ self.size_control = (other_table = other.table) ? other_table.size : DEFAULT_CAPACITY
753
+ self
754
+ end
755
+
756
+ def try_await_lock(current_table, i, node)
757
+ check_for_resize # try resizing if can't get lock
758
+ node.try_await_lock(current_table, i)
759
+ end
760
+
761
+ def key_hash(key)
762
+ key.hash & HASH_BITS
763
+ end
764
+
765
+ # Returns a power of two table size for the given desired capacity.
766
+ def table_size_for(entry_count)
767
+ size = 2
768
+ size <<= 1 while size < entry_count
769
+ size
770
+ end
771
+
772
+ # Initializes table, using the size recorded in +size_control+.
773
+ def initialize_table
774
+ until current_table ||= table
775
+ if (size_ctrl = size_control) == NOW_RESIZING
776
+ Thread.pass # lost initialization race; just spin
777
+ else
778
+ try_in_resize_lock(current_table, size_ctrl) do
779
+ initial_size = size_ctrl > 0 ? size_ctrl : DEFAULT_CAPACITY
780
+ current_table = self.table = Table.new(initial_size)
781
+ initial_size - (initial_size >> 2) # 75% load factor
782
+ end
783
+ end
784
+ end
785
+ current_table
786
+ end
787
+
788
+ # If table is too small and not already resizing, creates next table and
789
+ # transfers bins. Rechecks occupancy after a transfer to see if another
790
+ # resize is already needed because resizings are lagging additions.
791
+ def check_for_resize
792
+ while (current_table = table) && MAX_CAPACITY > (table_size = current_table.size) && NOW_RESIZING != (size_ctrl = size_control) && size_ctrl < @counter.sum
793
+ try_in_resize_lock(current_table, size_ctrl) do
794
+ self.table = rebuild(current_table)
795
+ (table_size << 1) - (table_size >> 1) # 75% load factor
796
+ end
797
+ end
798
+ end
799
+
800
+ def try_in_resize_lock(current_table, size_ctrl)
801
+ if cas_size_control(size_ctrl, NOW_RESIZING)
802
+ begin
803
+ if current_table == table # recheck under lock
804
+ size_ctrl = yield # get new size_control
805
+ end
806
+ ensure
807
+ self.size_control = size_ctrl
808
+ end
809
+ end
810
+ end
811
+
812
+ # Moves and/or copies the nodes in each bin to new table. See above for explanation.
813
+ def rebuild(table)
814
+ old_table_size = table.size
815
+ new_table = table.next_in_size_table
816
+ # puts "#{old_table_size} -> #{new_table.size}"
817
+ forwarder = Node.new(MOVED, new_table, NULL)
818
+ rev_forwarder = nil
819
+ locked_indexes = nil # holds bins to revisit; nil until needed
820
+ locked_arr_idx = 0
821
+ bin = old_table_size - 1
822
+ i = bin
823
+ while true
824
+ if !(node = table.volatile_get(i))
825
+ # no lock needed (or available) if bin >= 0, because we're not popping values from locked_indexes until we've run through the whole table
826
+ redo unless (bin >= 0 ? table.cas(i, nil, forwarder) : lock_and_clean_up_reverse_forwarders(table, old_table_size, new_table, i, forwarder))
827
+ elsif Node.locked_hash?(node_hash = node.hash)
828
+ locked_indexes ||= Array.new
829
+ if bin < 0 && locked_arr_idx > 0
830
+ locked_arr_idx -= 1
831
+ i, locked_indexes[locked_arr_idx] = locked_indexes[locked_arr_idx], i # swap with another bin
832
+ redo
833
+ end
834
+ if bin < 0 || locked_indexes.size >= TRANSFER_BUFFER_SIZE
835
+ node.try_await_lock(table, i) # no other options -- block
836
+ redo
837
+ end
838
+ rev_forwarder ||= Node.new(MOVED, table, NULL)
839
+ redo unless table.volatile_get(i) == node && node.locked? # recheck before adding to list
840
+ locked_indexes << i
841
+ new_table.volatile_set(i, rev_forwarder)
842
+ new_table.volatile_set(i + old_table_size, rev_forwarder)
843
+ else
844
+ redo unless split_old_bin(table, new_table, i, node, node_hash, forwarder)
845
+ end
846
+
847
+ if bin > 0
848
+ i = (bin -= 1)
849
+ elsif locked_indexes && !locked_indexes.empty?
850
+ bin = -1
851
+ i = locked_indexes.pop
852
+ locked_arr_idx = locked_indexes.size - 1
853
+ else
854
+ return new_table
855
+ end
856
+ end
857
+ end
858
+
859
+ def lock_and_clean_up_reverse_forwarders(old_table, old_table_size, new_table, i, forwarder)
860
+ # transiently use a locked forwarding node
861
+ locked_forwarder = Node.new(moved_locked_hash = MOVED | LOCKED, new_table, NULL)
862
+ if old_table.cas(i, nil, locked_forwarder)
863
+ new_table.volatile_set(i, nil) # kill the potential reverse forwarders
864
+ new_table.volatile_set(i + old_table_size, nil) # kill the potential reverse forwarders
865
+ old_table.volatile_set(i, forwarder)
866
+ locked_forwarder.unlock_via_hash(moved_locked_hash, MOVED)
867
+ true
868
+ end
869
+ end
870
+
871
+ # Splits a normal bin with list headed by e into lo and hi parts; installs in given table.
872
+ def split_old_bin(table, new_table, i, node, node_hash, forwarder)
873
+ table.try_lock_via_hash(i, node, node_hash) do
874
+ split_bin(new_table, i, node, node_hash)
875
+ table.volatile_set(i, forwarder)
876
+ end
877
+ end
878
+
879
+ def split_bin(new_table, i, node, node_hash)
880
+ bit = new_table.size >> 1 # bit to split on
881
+ run_bit = node_hash & bit
882
+ last_run = nil
883
+ low = nil
884
+ high = nil
885
+ current_node = node
886
+ # this optimises for the lowest amount of volatile writes and objects created
887
+ while current_node = current_node.next
888
+ unless (b = current_node.hash & bit) == run_bit
889
+ run_bit = b
890
+ last_run = current_node
891
+ end
892
+ end
893
+ if run_bit == 0
894
+ low = last_run
895
+ else
896
+ high = last_run
897
+ end
898
+ current_node = node
899
+ until current_node == last_run
900
+ pure_hash = current_node.pure_hash
901
+ if (pure_hash & bit) == 0
902
+ low = Node.new(pure_hash, current_node.key, current_node.value, low)
903
+ else
904
+ high = Node.new(pure_hash, current_node.key, current_node.value, high)
905
+ end
906
+ current_node = current_node.next
907
+ end
908
+ new_table.volatile_set(i, low)
909
+ new_table.volatile_set(i + bit, high)
910
+ end
911
+
912
+ def increment_size
913
+ @counter.increment
914
+ end
915
+
916
+ def decrement_size(by = 1)
917
+ @counter.add(-by)
918
+ end
919
+ end
920
+ end
921
+ end