thread_safe 0.0.3 → 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,12 @@
1
+ module ThreadSafe
2
+ module Util
3
+ # An overhead-less atomic reference.
4
+ AtomicReference =
5
+ if defined?(Rubinius::AtomicReference)
6
+ Rubinius::AtomicReference
7
+ else
8
+ require 'atomic'
9
+ defined?(Atomic::InternalReference) ? Atomic::InternalReference : Atomic
10
+ end
11
+ end
12
+ end
@@ -0,0 +1,105 @@
1
+ module ThreadSafe
2
+ module Util
3
+ # Provides a cheapest possible (mainly in terms of memory usage) +Mutex+ with the +ConditionVariable+ bundled in.
4
+ #
5
+ # Usage:
6
+ # class A
7
+ # include CheapLockable
8
+ #
9
+ # def do_exlusively
10
+ # cheap_synchronize { yield }
11
+ # end
12
+ #
13
+ # def wait_for_something
14
+ # cheap_synchronize do
15
+ # cheap_wait until resource_available?
16
+ # do_something
17
+ # cheap_broadcast # wake up others
18
+ # end
19
+ # end
20
+ # end
21
+ module CheapLockable
22
+ private
23
+ engine = defined?(RUBY_ENGINE) && RUBY_ENGINE
24
+ if engine == 'rbx'
25
+ # Making use of the Rubinius' ability to lock via object headers to avoid the overhead of the extra Mutex objects.
26
+ def cheap_synchronize
27
+ Rubinius.lock(self)
28
+ begin
29
+ yield
30
+ ensure
31
+ Rubinius.unlock(self)
32
+ end
33
+ end
34
+
35
+ def cheap_wait
36
+ wchan = Rubinius::Channel.new
37
+
38
+ begin
39
+ waiters = @waiters ||= []
40
+ waiters.push wchan
41
+ Rubinius.unlock(self)
42
+ signaled = wchan.receive_timeout nil
43
+ ensure
44
+ Rubinius.lock(self)
45
+
46
+ unless signaled or waiters.delete(wchan)
47
+ # we timed out, but got signaled afterwards (e.g. while waiting to
48
+ # acquire @lock), so pass that signal on to the next waiter
49
+ waiters.shift << true unless waiters.empty?
50
+ end
51
+ end
52
+
53
+ self
54
+ end
55
+
56
+ def cheap_broadcast
57
+ waiters = @waiters ||= []
58
+ waiters.shift << true until waiters.empty?
59
+ self
60
+ end
61
+ elsif engine == 'jruby'
62
+ # Use Java's native synchronized (this) { wait(); notifyAll(); } to avoid the overhead of the extra Mutex objects
63
+ require 'jruby'
64
+
65
+ def cheap_synchronize
66
+ JRuby.reference0(self).synchronized { yield }
67
+ end
68
+
69
+ def cheap_wait
70
+ JRuby.reference0(self).wait
71
+ end
72
+
73
+ def cheap_broadcast
74
+ JRuby.reference0(self).notify_all
75
+ end
76
+ else
77
+ require 'thread'
78
+
79
+ extend Volatile
80
+ attr_volatile :mutex
81
+
82
+ # Non-reentrant Mutex#syncrhonize
83
+ def cheap_synchronize
84
+ true until (my_mutex = mutex) || cas_mutex(nil, my_mutex = Mutex.new)
85
+ my_mutex.synchronize { yield }
86
+ end
87
+
88
+ # Releases this object's +cheap_synchronize+ lock and goes to sleep waiting for other threads to +cheap_broadcast+, reacquires the lock on wakeup.
89
+ # Must only be called in +cheap_broadcast+'s block.
90
+ def cheap_wait
91
+ conditional_variable = @conditional_variable ||= ConditionVariable.new
92
+ conditional_variable.wait(mutex)
93
+ end
94
+
95
+ # Wakes up all threads waiting for this object's +cheap_synchronize+ lock.
96
+ # Must only be called in +cheap_broadcast+'s block.
97
+ def cheap_broadcast
98
+ if conditional_variable = @conditional_variable
99
+ conditional_variable.broadcast
100
+ end
101
+ end
102
+ end
103
+ end
104
+ end
105
+ end
@@ -0,0 +1,26 @@
1
+ module ThreadSafe
2
+ module Util
3
+ class PowerOfTwoTuple < VolatileTuple
4
+ def initialize(size)
5
+ raise ArgumentError, "size must be a power of 2 (#{size.inspect} provided)" unless size > 0 && size & (size - 1) == 0
6
+ super(size)
7
+ end
8
+
9
+ def hash_to_index(hash)
10
+ (size - 1) & hash
11
+ end
12
+
13
+ def volatile_get_by_hash(hash)
14
+ volatile_get(hash_to_index(hash))
15
+ end
16
+
17
+ def volatile_set_by_hash(hash, value)
18
+ volatile_set(hash_to_index(hash), value)
19
+ end
20
+
21
+ def next_in_size_table
22
+ self.class.new(size << 1)
23
+ end
24
+ end
25
+ end
26
+ end
@@ -0,0 +1,226 @@
1
+ module ThreadSafe
2
+ module Util
3
+ # A Ruby port of the Doug Lea's jsr166e.Striped64 class version 1.6 available in public domain.
4
+ # Original source code available here: http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/Striped64.java?revision=1.6
5
+ #
6
+ # Class holding common representation and mechanics for classes supporting dynamic striping on 64bit values.
7
+ #
8
+ # This class maintains a lazily-initialized table of atomically
9
+ # updated variables, plus an extra +base+ field. The table size
10
+ # is a power of two. Indexing uses masked per-thread hash codes.
11
+ # Nearly all methods on this class are private, accessed directly
12
+ # by subclasses.
13
+ #
14
+ # Table entries are of class +Cell+; a variant of AtomicLong padded
15
+ # to reduce cache contention on most processors. Padding is
16
+ # overkill for most Atomics because they are usually irregularly
17
+ # scattered in memory and thus don't interfere much with each
18
+ # other. But Atomic objects residing in arrays will tend to be
19
+ # placed adjacent to each other, and so will most often share
20
+ # cache lines (with a huge negative performance impact) without
21
+ # this precaution.
22
+ #
23
+ # In part because +Cell+s are relatively large, we avoid creating
24
+ # them until they are needed. When there is no contention, all
25
+ # updates are made to the +base+ field. Upon first contention (a
26
+ # failed CAS on +base+ update), the table is initialized to size 2.
27
+ # The table size is doubled upon further contention until
28
+ # reaching the nearest power of two greater than or equal to the
29
+ # number of CPUS. Table slots remain empty (+nil+) until they are
30
+ # needed.
31
+ #
32
+ # A single spinlock (+busy+) is used for initializing and
33
+ # resizing the table, as well as populating slots with new +Cell+s.
34
+ # There is no need for a blocking lock: When the lock is not
35
+ # available, threads try other slots (or the base). During these
36
+ # retries, there is increased contention and reduced locality,
37
+ # which is still better than alternatives.
38
+ #
39
+ # Per-thread hash codes are initialized to random values.
40
+ # Contention and/or table collisions are indicated by failed
41
+ # CASes when performing an update operation (see method
42
+ # +retry_update+). Upon a collision, if the table size is less than
43
+ # the capacity, it is doubled in size unless some other thread
44
+ # holds the lock. If a hashed slot is empty, and lock is
45
+ # available, a new +Cell+ is created. Otherwise, if the slot
46
+ # exists, a CAS is tried. Retries proceed by "double hashing",
47
+ # using a secondary hash (XorShift) to try to find a
48
+ # free slot.
49
+ #
50
+ # The table size is capped because, when there are more threads
51
+ # than CPUs, supposing that each thread were bound to a CPU,
52
+ # there would exist a perfect hash function mapping threads to
53
+ # slots that eliminates collisions. When we reach capacity, we
54
+ # search for this mapping by randomly varying the hash codes of
55
+ # colliding threads. Because search is random, and collisions
56
+ # only become known via CAS failures, convergence can be slow,
57
+ # and because threads are typically not bound to CPUS forever,
58
+ # may not occur at all. However, despite these limitations,
59
+ # observed contention rates are typically low in these cases.
60
+ #
61
+ # It is possible for a +Cell+ to become unused when threads that
62
+ # once hashed to it terminate, as well as in the case where
63
+ # doubling the table causes no thread to hash to it under
64
+ # expanded mask. We do not try to detect or remove such cells,
65
+ # under the assumption that for long-running instances, observed
66
+ # contention levels will recur, so the cells will eventually be
67
+ # needed again; and for short-lived ones, it does not matter.
68
+ class Striped64
69
+ # Padded variant of AtomicLong supporting only raw accesses plus CAS.
70
+ # The +value+ field is placed between pads, hoping that the JVM doesn't
71
+ # reorder them.
72
+ #
73
+ # Optimisation note: It would be possible to use a release-only
74
+ # form of CAS here, if it were provided.
75
+ class Cell < AtomicReference
76
+ # TODO: this only adds padding after the :value slot, need to find a way to add padding before the slot
77
+ attr_reader *(Array.new(12).map {|i| :"padding_#{i}"})
78
+
79
+ alias_method :cas, :compare_and_set
80
+
81
+ def cas_computed
82
+ cas(current_value = value, yield(current_value))
83
+ end
84
+ end
85
+
86
+ extend Volatile
87
+ attr_volatile :cells, # Table of cells. When non-null, size is a power of 2.
88
+ :base, # Base value, used mainly when there is no contention, but also as a fallback during table initialization races. Updated via CAS.
89
+ :busy # Spinlock (locked via CAS) used when resizing and/or creating Cells.
90
+
91
+ alias_method :busy?, :busy
92
+
93
+ def initialize
94
+ super()
95
+ self.busy = false
96
+ self.base = 0
97
+ end
98
+
99
+ # Handles cases of updates involving initialization, resizing,
100
+ # creating new Cells, and/or contention. See above for
101
+ # explanation. This method suffers the usual non-modularity
102
+ # problems of optimistic retry code, relying on rechecked sets of
103
+ # reads.
104
+ #
105
+ # Arguments:
106
+ # [+x+]
107
+ # the value
108
+ # [+hash_code+]
109
+ # hash code used
110
+ # [+x+]
111
+ # false if CAS failed before call
112
+ def retry_update(x, hash_code, was_uncontended) # :yields: current_value
113
+ hash = hash_code
114
+ collided = false # True if last slot nonempty
115
+ while true
116
+ if current_cells = cells
117
+ if !(cell = current_cells.volatile_get_by_hash(hash))
118
+ if busy?
119
+ collided = false
120
+ else # Try to attach new Cell
121
+ if try_to_install_new_cell(Cell.new(x), hash) # Optimistically create and try to insert new cell
122
+ break
123
+ else
124
+ redo # Slot is now non-empty
125
+ end
126
+ end
127
+ elsif !was_uncontended # CAS already known to fail
128
+ was_uncontended = true # Continue after rehash
129
+ elsif cell.cas_computed {|current_value| yield current_value}
130
+ break
131
+ elsif current_cells.size >= CPU_COUNT || cells != current_cells # At max size or stale
132
+ collided = false
133
+ elsif collided && expand_table_unless_stale(current_cells)
134
+ collided = false
135
+ redo # Retry with expanded table
136
+ else
137
+ collided = true
138
+ end
139
+ hash = XorShiftRandom.xorshift(hash)
140
+
141
+ elsif try_initialize_cells(x, hash) || cas_base_computed {|current_base| yield current_base}
142
+ break
143
+ end
144
+ end
145
+ self.hash_code = hash
146
+ end
147
+
148
+ private
149
+ # Static per-thread hash code key. Shared across all instances to
150
+ # reduce Thread locals pollution and because adjustments due to
151
+ # collisions in one table are likely to be appropriate for
152
+ # others.
153
+ THREAD_LOCAL_KEY = "#{name}.hash_code".to_sym
154
+
155
+ # A thread-local hash code accessor. The code is initially
156
+ # random, but may be set to a different value upon collisions.
157
+ def hash_code
158
+ Thread.current[THREAD_LOCAL_KEY] ||= XorShiftRandom.get
159
+ end
160
+
161
+ def hash_code=(hash)
162
+ Thread.current[THREAD_LOCAL_KEY] = hash
163
+ end
164
+
165
+ # Sets base and all +cells+ to the given value.
166
+ def internal_reset(initial_value)
167
+ current_cells = cells
168
+ self.base = initial_value
169
+ if current_cells
170
+ current_cells.each do |cell|
171
+ cell.value = initial_value if cell
172
+ end
173
+ end
174
+ end
175
+
176
+ def cas_base_computed
177
+ cas_base(current_base = base, yield(current_base))
178
+ end
179
+
180
+ def free?
181
+ !busy?
182
+ end
183
+
184
+ def try_initialize_cells(x, hash)
185
+ if free? && !cells
186
+ try_in_busy do
187
+ unless cells # Recheck under lock
188
+ new_cells = PowerOfTwoTuple.new(2)
189
+ new_cells.volatile_set_by_hash(hash, Cell.new(x))
190
+ self.cells = new_cells
191
+ end
192
+ end
193
+ end
194
+ end
195
+
196
+ def expand_table_unless_stale(current_cells)
197
+ try_in_busy do
198
+ if current_cells == cells # Recheck under lock
199
+ new_cells = current_cells.next_in_size_table
200
+ current_cells.each_with_index {|x, i| new_cells.volatile_set(i, x)}
201
+ self.cells = new_cells
202
+ end
203
+ end
204
+ end
205
+
206
+ def try_to_install_new_cell(new_cell, hash)
207
+ try_in_busy do
208
+ # Recheck under lock
209
+ if (current_cells = cells) && !current_cells.volatile_get(i = current_cells.hash_to_index(hash))
210
+ current_cells.volatile_set(i, new_cell)
211
+ end
212
+ end
213
+ end
214
+
215
+ def try_in_busy
216
+ if cas_busy(false, true)
217
+ begin
218
+ yield
219
+ ensure
220
+ self.busy = false
221
+ end
222
+ end
223
+ end
224
+ end
225
+ end
226
+ end
@@ -0,0 +1,62 @@
1
+ module ThreadSafe
2
+ module Util
3
+ module Volatile
4
+ # Provides +volatile+ (in the JVM's sense) attribute accessors implemented atop of the +AtomicReference+s.
5
+ # Usage:
6
+ # class Foo
7
+ # extend ThreadSafe::Util::Volatile
8
+ # attr_volatile :foo, :bar
9
+ #
10
+ # def initialize(bar)
11
+ # super() # must super() into parent initializers before using the volatile attribute accessors
12
+ # self.bar = bar
13
+ # end
14
+ #
15
+ # def hello
16
+ # my_foo = foo # volatile read
17
+ # self.foo = 1 # volatile write
18
+ # cas_foo(1, 2) # => true | a strong CAS
19
+ # end
20
+ # end
21
+ def attr_volatile(*attr_names)
22
+ return if attr_names.empty?
23
+ include(Module.new do
24
+ atomic_ref_setup = attr_names.map {|attr_name| "@__#{attr_name} = ThreadSafe::Util::AtomicReference.new"}
25
+ initialize_copy_setup = attr_names.zip(atomic_ref_setup).map do |attr_name, ref_setup|
26
+ "#{ref_setup}(other.instance_variable_get(:@__#{attr_name}).get)"
27
+ end
28
+ class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1
29
+ def initialize(*)
30
+ super
31
+ #{atomic_ref_setup.join('; ')}
32
+ end
33
+
34
+ def initialize_copy(other)
35
+ super
36
+ #{initialize_copy_setup.join('; ')}
37
+ end
38
+ RUBY_EVAL
39
+
40
+ attr_names.each do |attr_name|
41
+ class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1
42
+ def #{attr_name}
43
+ @__#{attr_name}.get
44
+ end
45
+
46
+ def #{attr_name}=(value)
47
+ @__#{attr_name}.set(value)
48
+ end
49
+
50
+ def compare_and_set_#{attr_name}(old_value, new_value)
51
+ @__#{attr_name}.compare_and_set(old_value, new_value)
52
+ end
53
+ RUBY_EVAL
54
+
55
+ alias_method :"cas_#{attr_name}", :"compare_and_set_#{attr_name}"
56
+ alias_method :"lazy_set_#{attr_name}", :"#{attr_name}="
57
+ end
58
+ end)
59
+ end
60
+ end
61
+ end
62
+ end
@@ -0,0 +1,46 @@
1
+ module ThreadSafe
2
+ module Util
3
+ # A fixed size array with volatile volatile getters/setters.
4
+ # Usage:
5
+ # arr = VolatileTuple.new(16)
6
+ # arr.volatile_set(0, :foo)
7
+ # arr.volatile_get(0) # => :foo
8
+ # arr.cas(0, :foo, :bar) # => true
9
+ # arr.volatile_get(0) # => :bar
10
+ class VolatileTuple
11
+ include Enumerable
12
+
13
+ Tuple = defined?(Rubinius::Tuple) ? Rubinius::Tuple : Array
14
+
15
+ def initialize(size)
16
+ @tuple = tuple = Tuple.new(size)
17
+ i = 0
18
+ while i < size
19
+ tuple[i] = AtomicReference.new
20
+ i += 1
21
+ end
22
+ end
23
+
24
+ def volatile_get(i)
25
+ @tuple[i].get
26
+ end
27
+
28
+ def volatile_set(i, value)
29
+ @tuple[i].set(value)
30
+ end
31
+
32
+ def compare_and_set(i, old_value, new_value)
33
+ @tuple[i].compare_and_set(old_value, new_value)
34
+ end
35
+ alias_method :cas, :compare_and_set
36
+
37
+ def size
38
+ @tuple.size
39
+ end
40
+
41
+ def each
42
+ @tuple.each {|ref| yield ref.get}
43
+ end
44
+ end
45
+ end
46
+ end