concurrent-ruby 0.9.2 → 1.0.0.pre1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +15 -1
  3. data/README.md +67 -68
  4. data/lib/concurrent.rb +14 -1
  5. data/lib/concurrent/array.rb +38 -0
  6. data/lib/concurrent/async.rb +0 -17
  7. data/lib/concurrent/atomic/abstract_thread_local_var.rb +40 -0
  8. data/lib/concurrent/atomic/atomic_boolean.rb +81 -118
  9. data/lib/concurrent/atomic/atomic_fixnum.rb +98 -162
  10. data/lib/concurrent/atomic/atomic_reference.rb +0 -7
  11. data/lib/concurrent/atomic/count_down_latch.rb +62 -103
  12. data/lib/concurrent/atomic/cyclic_barrier.rb +2 -0
  13. data/lib/concurrent/atomic/java_count_down_latch.rb +39 -0
  14. data/lib/concurrent/atomic/java_thread_local_var.rb +50 -0
  15. data/lib/concurrent/atomic/mutex_atomic_boolean.rb +60 -0
  16. data/lib/concurrent/atomic/mutex_atomic_fixnum.rb +91 -0
  17. data/lib/concurrent/atomic/mutex_count_down_latch.rb +43 -0
  18. data/lib/concurrent/atomic/mutex_semaphore.rb +115 -0
  19. data/lib/concurrent/atomic/ruby_thread_local_var.rb +172 -0
  20. data/lib/concurrent/atomic/semaphore.rb +84 -178
  21. data/lib/concurrent/atomic/thread_local_var.rb +63 -294
  22. data/lib/concurrent/atomic_reference/mutex_atomic.rb +14 -8
  23. data/lib/concurrent/atomics.rb +0 -33
  24. data/lib/concurrent/collection/java_non_concurrent_priority_queue.rb +84 -0
  25. data/lib/concurrent/collection/map/atomic_reference_map_backend.rb +921 -0
  26. data/lib/concurrent/collection/map/mri_map_backend.rb +66 -0
  27. data/lib/concurrent/collection/map/non_concurrent_map_backend.rb +142 -0
  28. data/lib/concurrent/collection/map/synchronized_map_backend.rb +86 -0
  29. data/lib/concurrent/collection/non_concurrent_priority_queue.rb +143 -0
  30. data/lib/concurrent/collection/ruby_non_concurrent_priority_queue.rb +150 -0
  31. data/lib/concurrent/concern/logging.rb +1 -1
  32. data/lib/concurrent/concern/obligation.rb +0 -12
  33. data/lib/concurrent/configuration.rb +18 -148
  34. data/lib/concurrent/delay.rb +5 -4
  35. data/lib/concurrent/exchanger.rb +327 -41
  36. data/lib/concurrent/executor/abstract_executor_service.rb +134 -0
  37. data/lib/concurrent/executor/executor.rb +4 -29
  38. data/lib/concurrent/executor/executor_service.rb +23 -359
  39. data/lib/concurrent/executor/immediate_executor.rb +3 -2
  40. data/lib/concurrent/executor/java_executor_service.rb +100 -0
  41. data/lib/concurrent/executor/java_single_thread_executor.rb +3 -2
  42. data/lib/concurrent/executor/java_thread_pool_executor.rb +3 -4
  43. data/lib/concurrent/executor/ruby_executor_service.rb +72 -0
  44. data/lib/concurrent/executor/ruby_single_thread_executor.rb +7 -5
  45. data/lib/concurrent/executor/ruby_thread_pool_executor.rb +3 -11
  46. data/lib/concurrent/executor/safe_task_executor.rb +1 -1
  47. data/lib/concurrent/executor/serial_executor_service.rb +34 -0
  48. data/lib/concurrent/executor/serialized_execution.rb +8 -31
  49. data/lib/concurrent/executor/serialized_execution_delegator.rb +28 -0
  50. data/lib/concurrent/executor/simple_executor_service.rb +1 -10
  51. data/lib/concurrent/executor/timer_set.rb +4 -8
  52. data/lib/concurrent/executors.rb +13 -2
  53. data/lib/concurrent/future.rb +2 -2
  54. data/lib/concurrent/hash.rb +35 -0
  55. data/lib/concurrent/ivar.rb +9 -14
  56. data/lib/concurrent/map.rb +178 -0
  57. data/lib/concurrent/promise.rb +2 -2
  58. data/lib/concurrent/scheduled_task.rb +9 -69
  59. data/lib/concurrent/thread_safe/synchronized_delegator.rb +50 -0
  60. data/lib/concurrent/thread_safe/util.rb +23 -0
  61. data/lib/concurrent/thread_safe/util/adder.rb +71 -0
  62. data/lib/concurrent/thread_safe/util/array_hash_rbx.rb +28 -0
  63. data/lib/concurrent/thread_safe/util/cheap_lockable.rb +115 -0
  64. data/lib/concurrent/thread_safe/util/power_of_two_tuple.rb +37 -0
  65. data/lib/concurrent/thread_safe/util/striped64.rb +236 -0
  66. data/lib/concurrent/thread_safe/util/volatile.rb +73 -0
  67. data/lib/concurrent/thread_safe/util/xor_shift_random.rb +48 -0
  68. data/lib/concurrent/timer_task.rb +3 -3
  69. data/lib/concurrent/tuple.rb +86 -0
  70. data/lib/concurrent/version.rb +2 -2
  71. metadata +37 -10
  72. data/lib/concurrent/atomic/condition.rb +0 -78
  73. data/lib/concurrent/collection/priority_queue.rb +0 -360
  74. data/lib/concurrent/utilities.rb +0 -5
  75. data/lib/concurrent/utility/timeout.rb +0 -39
  76. data/lib/concurrent/utility/timer.rb +0 -26
  77. data/lib/concurrent_ruby.rb +0 -2
@@ -0,0 +1,50 @@
1
+ require 'delegate'
2
+ require 'monitor'
3
+
4
+ module Concurrent
5
+ unless defined?(SynchronizedDelegator)
6
+
7
+ # This class provides a trivial way to synchronize all calls to a given object
8
+ # by wrapping it with a `Delegator` that performs `Monitor#enter/exit` calls
9
+ # around the delegated `#send`. Example:
10
+ #
11
+ # array = [] # not thread-safe on many impls
12
+ # array = SynchronizedDelegator.new([]) # thread-safe
13
+ #
14
+ # A simple `Monitor` provides a very coarse-grained way to synchronize a given
15
+ # object, in that it will cause synchronization for methods that have no need
16
+ # for it, but this is a trivial way to get thread-safety where none may exist
17
+ # currently on some implementations.
18
+ #
19
+ # This class is currently being considered for inclusion into stdlib, via
20
+ # https://bugs.ruby-lang.org/issues/8556
21
+ #
22
+ # @!visibility private
23
+ class SynchronizedDelegator < SimpleDelegator
24
+ def setup
25
+ @old_abort = Thread.abort_on_exception
26
+ Thread.abort_on_exception = true
27
+ end
28
+
29
+ def teardown
30
+ Thread.abort_on_exception = @old_abort
31
+ end
32
+
33
+ def initialize(obj)
34
+ __setobj__(obj)
35
+ @monitor = Monitor.new
36
+ end
37
+
38
+ def method_missing(method, *args, &block)
39
+ monitor = @monitor
40
+ begin
41
+ monitor.enter
42
+ super
43
+ ensure
44
+ monitor.exit
45
+ end
46
+ end
47
+
48
+ end
49
+ end
50
+ end
@@ -0,0 +1,23 @@
1
+ module Concurrent
2
+
3
+ # @!visibility private
4
+ module ThreadSafe
5
+
6
+ # @!visibility private
7
+ module Util
8
+
9
+ FIXNUM_BIT_SIZE = (0.size * 8) - 2
10
+ MAX_INT = (2 ** FIXNUM_BIT_SIZE) - 1
11
+ CPU_COUNT = 16 # is there a way to determine this?
12
+ end
13
+ end
14
+ end
15
+
16
+ require 'concurrent/tuple'
17
+ require 'concurrent/thread_safe/util/xor_shift_random'
18
+ require 'concurrent/thread_safe/util/volatile'
19
+ require 'concurrent/thread_safe/util/striped64'
20
+ require 'concurrent/thread_safe/util/adder'
21
+ require 'concurrent/thread_safe/util/cheap_lockable'
22
+ require 'concurrent/thread_safe/util/power_of_two_tuple'
23
+ require 'concurrent/thread_safe/util/array_hash_rbx'
@@ -0,0 +1,71 @@
1
+ module Concurrent
2
+
3
+ # @!visibility private
4
+ module ThreadSafe
5
+
6
+ # @!visibility private
7
+ module Util
8
+
9
+ # A Ruby port of the Doug Lea's jsr166e.LondAdder class version 1.8
10
+ # available in public domain.
11
+ #
12
+ # Original source code available here:
13
+ # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/LongAdder.java?revision=1.8
14
+ #
15
+ # One or more variables that together maintain an initially zero
16
+ # sum. When updates (method +add+) are contended across threads,
17
+ # the set of variables may grow dynamically to reduce contention.
18
+ # Method +sum+ returns the current total combined across the
19
+ # variables maintaining the sum.
20
+ #
21
+ # This class is usually preferable to single +Atomic+ reference when
22
+ # multiple threads update a common sum that is used for purposes such
23
+ # as collecting statistics, not for fine-grained synchronization
24
+ # control. Under low update contention, the two classes have similar
25
+ # characteristics. But under high contention, expected throughput of
26
+ # this class is significantly higher, at the expense of higher space
27
+ # consumption.
28
+ #
29
+ # @!visibility private
30
+ class Adder < Striped64
31
+ # Adds the given value.
32
+ def add(x)
33
+ if (current_cells = cells) || !cas_base_computed {|current_base| current_base + x}
34
+ was_uncontended = true
35
+ hash = hash_code
36
+ unless current_cells && (cell = current_cells.volatile_get_by_hash(hash)) && (was_uncontended = cell.cas_computed {|current_value| current_value + x})
37
+ retry_update(x, hash, was_uncontended) {|current_value| current_value + x}
38
+ end
39
+ end
40
+ end
41
+
42
+ def increment
43
+ add(1)
44
+ end
45
+
46
+ def decrement
47
+ add(-1)
48
+ end
49
+
50
+ # Returns the current sum. The returned value is _NOT_ an
51
+ # atomic snapshot: Invocation in the absence of concurrent
52
+ # updates returns an accurate result, but concurrent updates that
53
+ # occur while the sum is being calculated might not be
54
+ # incorporated.
55
+ def sum
56
+ x = base
57
+ if current_cells = cells
58
+ current_cells.each do |cell|
59
+ x += cell.value if cell
60
+ end
61
+ end
62
+ x
63
+ end
64
+
65
+ def reset
66
+ internal_reset(0)
67
+ end
68
+ end
69
+ end
70
+ end
71
+ end
@@ -0,0 +1,28 @@
1
+ module Concurrent
2
+ module ThreadSafe
3
+ module Util
4
+ def self.make_synchronized_on_rbx(klass)
5
+ klass.class_eval do
6
+ private
7
+ def _mon_initialize
8
+ @_monitor = Monitor.new unless @_monitor # avoid double initialisation
9
+ end
10
+
11
+ def self.allocate
12
+ obj = super
13
+ obj.send(:_mon_initialize)
14
+ obj
15
+ end
16
+ end
17
+
18
+ klass.superclass.instance_methods(false).each do |method|
19
+ klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1
20
+ def #{method}(*args)
21
+ @_monitor.synchronize { super }
22
+ end
23
+ RUBY
24
+ end
25
+ end
26
+ end
27
+ end
28
+ end
@@ -0,0 +1,115 @@
1
+ module Concurrent
2
+
3
+ # @!visibility private
4
+ module ThreadSafe
5
+
6
+ # @!visibility private
7
+ module Util
8
+
9
+ # Provides a cheapest possible (mainly in terms of memory usage) +Mutex+
10
+ # with the +ConditionVariable+ bundled in.
11
+ #
12
+ # Usage:
13
+ # class A
14
+ # include CheapLockable
15
+ #
16
+ # def do_exlusively
17
+ # cheap_synchronize { yield }
18
+ # end
19
+ #
20
+ # def wait_for_something
21
+ # cheap_synchronize do
22
+ # cheap_wait until resource_available?
23
+ # do_something
24
+ # cheap_broadcast # wake up others
25
+ # end
26
+ # end
27
+ # end
28
+ #
29
+ # @!visibility private
30
+ module CheapLockable
31
+ private
32
+ engine = defined?(RUBY_ENGINE) && RUBY_ENGINE
33
+ if engine == 'rbx'
34
+ # Making use of the Rubinius' ability to lock via object headers to avoid the overhead of the extra Mutex objects.
35
+ def cheap_synchronize
36
+ Rubinius.lock(self)
37
+ begin
38
+ yield
39
+ ensure
40
+ Rubinius.unlock(self)
41
+ end
42
+ end
43
+
44
+ def cheap_wait
45
+ wchan = Rubinius::Channel.new
46
+
47
+ begin
48
+ waiters = @waiters ||= []
49
+ waiters.push wchan
50
+ Rubinius.unlock(self)
51
+ signaled = wchan.receive_timeout nil
52
+ ensure
53
+ Rubinius.lock(self)
54
+
55
+ unless signaled or waiters.delete(wchan)
56
+ # we timed out, but got signaled afterwards (e.g. while waiting to
57
+ # acquire @lock), so pass that signal on to the next waiter
58
+ waiters.shift << true unless waiters.empty?
59
+ end
60
+ end
61
+
62
+ self
63
+ end
64
+
65
+ def cheap_broadcast
66
+ waiters = @waiters ||= []
67
+ waiters.shift << true until waiters.empty?
68
+ self
69
+ end
70
+ elsif engine == 'jruby'
71
+ # Use Java's native synchronized (this) { wait(); notifyAll(); } to avoid the overhead of the extra Mutex objects
72
+ require 'jruby'
73
+
74
+ def cheap_synchronize
75
+ JRuby.reference0(self).synchronized { yield }
76
+ end
77
+
78
+ def cheap_wait
79
+ JRuby.reference0(self).wait
80
+ end
81
+
82
+ def cheap_broadcast
83
+ JRuby.reference0(self).notify_all
84
+ end
85
+ else
86
+ require 'thread'
87
+
88
+ extend Volatile
89
+ attr_volatile :mutex
90
+
91
+ # Non-reentrant Mutex#syncrhonize
92
+ def cheap_synchronize
93
+ true until (my_mutex = mutex) || cas_mutex(nil, my_mutex = Mutex.new)
94
+ my_mutex.synchronize { yield }
95
+ end
96
+
97
+ # Releases this object's +cheap_synchronize+ lock and goes to sleep waiting for other threads to +cheap_broadcast+, reacquires the lock on wakeup.
98
+ # Must only be called in +cheap_broadcast+'s block.
99
+ def cheap_wait
100
+ conditional_variable = @conditional_variable ||= ConditionVariable.new
101
+ conditional_variable.wait(mutex)
102
+ end
103
+
104
+ # Wakes up all threads waiting for this object's +cheap_synchronize+ lock.
105
+ # Must only be called in +cheap_broadcast+'s block.
106
+ def cheap_broadcast
107
+ if conditional_variable = @conditional_variable
108
+ conditional_variable.broadcast
109
+ end
110
+ end
111
+ end
112
+ end
113
+ end
114
+ end
115
+ end
@@ -0,0 +1,37 @@
1
+ require 'concurrent/tuple'
2
+
3
+ module Concurrent
4
+
5
+ # @!visibility private
6
+ module ThreadSafe
7
+
8
+ # @!visibility private
9
+ module Util
10
+
11
+ # @!visibility private
12
+ class PowerOfTwoTuple < Concurrent::Tuple
13
+
14
+ def initialize(size)
15
+ raise ArgumentError, "size must be a power of 2 (#{size.inspect} provided)" unless size > 0 && size & (size - 1) == 0
16
+ super(size)
17
+ end
18
+
19
+ def hash_to_index(hash)
20
+ (size - 1) & hash
21
+ end
22
+
23
+ def volatile_get_by_hash(hash)
24
+ volatile_get(hash_to_index(hash))
25
+ end
26
+
27
+ def volatile_set_by_hash(hash, value)
28
+ volatile_set(hash_to_index(hash), value)
29
+ end
30
+
31
+ def next_in_size_table
32
+ self.class.new(size << 1)
33
+ end
34
+ end
35
+ end
36
+ end
37
+ end
@@ -0,0 +1,236 @@
1
+ module Concurrent
2
+
3
+ # @!visibility private
4
+ module ThreadSafe
5
+
6
+ # @!visibility private
7
+ module Util
8
+
9
+ # A Ruby port of the Doug Lea's jsr166e.Striped64 class version 1.6
10
+ # available in public domain.
11
+ #
12
+ # Original source code available here:
13
+ # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/Striped64.java?revision=1.6
14
+ #
15
+ # Class holding common representation and mechanics for classes supporting
16
+ # dynamic striping on 64bit values.
17
+ #
18
+ # This class maintains a lazily-initialized table of atomically updated
19
+ # variables, plus an extra +base+ field. The table size is a power of two.
20
+ # Indexing uses masked per-thread hash codes. Nearly all methods on this
21
+ # class are private, accessed directly by subclasses.
22
+ #
23
+ # Table entries are of class +Cell+; a variant of AtomicLong padded to
24
+ # reduce cache contention on most processors. Padding is overkill for most
25
+ # Atomics because they are usually irregularly scattered in memory and thus
26
+ # don't interfere much with each other. But Atomic objects residing in
27
+ # arrays will tend to be placed adjacent to each other, and so will most
28
+ # often share cache lines (with a huge negative performance impact) without
29
+ # this precaution.
30
+ #
31
+ # In part because +Cell+s are relatively large, we avoid creating them until
32
+ # they are needed. When there is no contention, all updates are made to the
33
+ # +base+ field. Upon first contention (a failed CAS on +base+ update), the
34
+ # table is initialized to size 2. The table size is doubled upon further
35
+ # contention until reaching the nearest power of two greater than or equal
36
+ # to the number of CPUS. Table slots remain empty (+nil+) until they are
37
+ # needed.
38
+ #
39
+ # A single spinlock (+busy+) is used for initializing and resizing the
40
+ # table, as well as populating slots with new +Cell+s. There is no need for
41
+ # a blocking lock: When the lock is not available, threads try other slots
42
+ # (or the base). During these retries, there is increased contention and
43
+ # reduced locality, which is still better than alternatives.
44
+ #
45
+ # Per-thread hash codes are initialized to random values. Contention and/or
46
+ # table collisions are indicated by failed CASes when performing an update
47
+ # operation (see method +retry_update+). Upon a collision, if the table size
48
+ # is less than the capacity, it is doubled in size unless some other thread
49
+ # holds the lock. If a hashed slot is empty, and lock is available, a new
50
+ # +Cell+ is created. Otherwise, if the slot exists, a CAS is tried. Retries
51
+ # proceed by "double hashing", using a secondary hash (XorShift) to try to
52
+ # find a free slot.
53
+ #
54
+ # The table size is capped because, when there are more threads than CPUs,
55
+ # supposing that each thread were bound to a CPU, there would exist a
56
+ # perfect hash function mapping threads to slots that eliminates collisions.
57
+ # When we reach capacity, we search for this mapping by randomly varying the
58
+ # hash codes of colliding threads. Because search is random, and collisions
59
+ # only become known via CAS failures, convergence can be slow, and because
60
+ # threads are typically not bound to CPUS forever, may not occur at all.
61
+ # However, despite these limitations, observed contention rates are
62
+ # typically low in these cases.
63
+ #
64
+ # It is possible for a +Cell+ to become unused when threads that once hashed
65
+ # to it terminate, as well as in the case where doubling the table causes no
66
+ # thread to hash to it under expanded mask. We do not try to detect or
67
+ # remove such cells, under the assumption that for long-running instances,
68
+ # observed contention levels will recur, so the cells will eventually be
69
+ # needed again; and for short-lived ones, it does not matter.
70
+ #
71
+ # @!visibility private
72
+ class Striped64
73
+
74
+ # Padded variant of AtomicLong supporting only raw accesses plus CAS.
75
+ # The +value+ field is placed between pads, hoping that the JVM doesn't
76
+ # reorder them.
77
+ #
78
+ # Optimisation note: It would be possible to use a release-only
79
+ # form of CAS here, if it were provided.
80
+ #
81
+ # @!visibility private
82
+ class Cell < Concurrent::AtomicReference
83
+
84
+ # TODO: this only adds padding after the :value slot, need to find a way to add padding before the slot
85
+ # @!visibility private
86
+ attr_reader *(Array.new(12).map {|i| :"padding_#{i}"})
87
+
88
+ alias_method :cas, :compare_and_set
89
+
90
+ def cas_computed
91
+ cas(current_value = value, yield(current_value))
92
+ end
93
+ end
94
+
95
+ extend Volatile
96
+ attr_volatile :cells, # Table of cells. When non-null, size is a power of 2.
97
+ :base, # Base value, used mainly when there is no contention, but also as a fallback during table initialization races. Updated via CAS.
98
+ :busy # Spinlock (locked via CAS) used when resizing and/or creating Cells.
99
+
100
+ alias_method :busy?, :busy
101
+
102
+ def initialize
103
+ super()
104
+ self.busy = false
105
+ self.base = 0
106
+ end
107
+
108
+ # Handles cases of updates involving initialization, resizing,
109
+ # creating new Cells, and/or contention. See above for
110
+ # explanation. This method suffers the usual non-modularity
111
+ # problems of optimistic retry code, relying on rechecked sets of
112
+ # reads.
113
+ #
114
+ # Arguments:
115
+ # [+x+]
116
+ # the value
117
+ # [+hash_code+]
118
+ # hash code used
119
+ # [+x+]
120
+ # false if CAS failed before call
121
+ def retry_update(x, hash_code, was_uncontended) # :yields: current_value
122
+ hash = hash_code
123
+ collided = false # True if last slot nonempty
124
+ while true
125
+ if current_cells = cells
126
+ if !(cell = current_cells.volatile_get_by_hash(hash))
127
+ if busy?
128
+ collided = false
129
+ else # Try to attach new Cell
130
+ if try_to_install_new_cell(Cell.new(x), hash) # Optimistically create and try to insert new cell
131
+ break
132
+ else
133
+ redo # Slot is now non-empty
134
+ end
135
+ end
136
+ elsif !was_uncontended # CAS already known to fail
137
+ was_uncontended = true # Continue after rehash
138
+ elsif cell.cas_computed {|current_value| yield current_value}
139
+ break
140
+ elsif current_cells.size >= CPU_COUNT || cells != current_cells # At max size or stale
141
+ collided = false
142
+ elsif collided && expand_table_unless_stale(current_cells)
143
+ collided = false
144
+ redo # Retry with expanded table
145
+ else
146
+ collided = true
147
+ end
148
+ hash = XorShiftRandom.xorshift(hash)
149
+
150
+ elsif try_initialize_cells(x, hash) || cas_base_computed {|current_base| yield current_base}
151
+ break
152
+ end
153
+ end
154
+ self.hash_code = hash
155
+ end
156
+
157
+ private
158
+ # Static per-thread hash code key. Shared across all instances to
159
+ # reduce Thread locals pollution and because adjustments due to
160
+ # collisions in one table are likely to be appropriate for
161
+ # others.
162
+ THREAD_LOCAL_KEY = "#{name}.hash_code".to_sym
163
+
164
+ # A thread-local hash code accessor. The code is initially
165
+ # random, but may be set to a different value upon collisions.
166
+ def hash_code
167
+ Thread.current[THREAD_LOCAL_KEY] ||= XorShiftRandom.get
168
+ end
169
+
170
+ def hash_code=(hash)
171
+ Thread.current[THREAD_LOCAL_KEY] = hash
172
+ end
173
+
174
+ # Sets base and all +cells+ to the given value.
175
+ def internal_reset(initial_value)
176
+ current_cells = cells
177
+ self.base = initial_value
178
+ if current_cells
179
+ current_cells.each do |cell|
180
+ cell.value = initial_value if cell
181
+ end
182
+ end
183
+ end
184
+
185
+ def cas_base_computed
186
+ cas_base(current_base = base, yield(current_base))
187
+ end
188
+
189
+ def free?
190
+ !busy?
191
+ end
192
+
193
+ def try_initialize_cells(x, hash)
194
+ if free? && !cells
195
+ try_in_busy do
196
+ unless cells # Recheck under lock
197
+ new_cells = PowerOfTwoTuple.new(2)
198
+ new_cells.volatile_set_by_hash(hash, Cell.new(x))
199
+ self.cells = new_cells
200
+ end
201
+ end
202
+ end
203
+ end
204
+
205
+ def expand_table_unless_stale(current_cells)
206
+ try_in_busy do
207
+ if current_cells == cells # Recheck under lock
208
+ new_cells = current_cells.next_in_size_table
209
+ current_cells.each_with_index {|x, i| new_cells.volatile_set(i, x)}
210
+ self.cells = new_cells
211
+ end
212
+ end
213
+ end
214
+
215
+ def try_to_install_new_cell(new_cell, hash)
216
+ try_in_busy do
217
+ # Recheck under lock
218
+ if (current_cells = cells) && !current_cells.volatile_get(i = current_cells.hash_to_index(hash))
219
+ current_cells.volatile_set(i, new_cell)
220
+ end
221
+ end
222
+ end
223
+
224
+ def try_in_busy
225
+ if cas_busy(false, true)
226
+ begin
227
+ yield
228
+ ensure
229
+ self.busy = false
230
+ end
231
+ end
232
+ end
233
+ end
234
+ end
235
+ end
236
+ end