thread_safe 0.1.1-java
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +21 -0
- data/Gemfile +4 -0
- data/LICENSE +144 -0
- data/README.md +34 -0
- data/Rakefile +36 -0
- data/examples/bench_cache.rb +35 -0
- data/ext/org/jruby/ext/thread_safe/JRubyCacheBackendLibrary.java +200 -0
- data/ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMapV8.java +3842 -0
- data/ext/org/jruby/ext/thread_safe/jsr166e/LongAdder.java +204 -0
- data/ext/org/jruby/ext/thread_safe/jsr166e/Striped64.java +342 -0
- data/ext/org/jruby/ext/thread_safe/jsr166y/ThreadLocalRandom.java +199 -0
- data/ext/thread_safe/JrubyCacheBackendService.java +15 -0
- data/lib/thread_safe.rb +65 -0
- data/lib/thread_safe/atomic_reference_cache_backend.rb +922 -0
- data/lib/thread_safe/cache.rb +137 -0
- data/lib/thread_safe/mri_cache_backend.rb +62 -0
- data/lib/thread_safe/non_concurrent_cache_backend.rb +133 -0
- data/lib/thread_safe/synchronized_cache_backend.rb +76 -0
- data/lib/thread_safe/synchronized_delegator.rb +35 -0
- data/lib/thread_safe/util.rb +16 -0
- data/lib/thread_safe/util/adder.rb +59 -0
- data/lib/thread_safe/util/atomic_reference.rb +12 -0
- data/lib/thread_safe/util/cheap_lockable.rb +105 -0
- data/lib/thread_safe/util/power_of_two_tuple.rb +26 -0
- data/lib/thread_safe/util/striped64.rb +226 -0
- data/lib/thread_safe/util/volatile.rb +62 -0
- data/lib/thread_safe/util/volatile_tuple.rb +46 -0
- data/lib/thread_safe/util/xor_shift_random.rb +39 -0
- data/lib/thread_safe/version.rb +3 -0
- data/test/test_array.rb +20 -0
- data/test/test_cache.rb +792 -0
- data/test/test_cache_loops.rb +453 -0
- data/test/test_hash.rb +20 -0
- data/test/test_helper.rb +73 -0
- data/test/test_synchronized_delegator.rb +42 -0
- data/thread_safe.gemspec +21 -0
- metadata +100 -0
@@ -0,0 +1,59 @@
|
|
1
|
+
module ThreadSafe
|
2
|
+
module Util
|
3
|
+
# A Ruby port of the Doug Lea's jsr166e.LondAdder class version 1.8 available in public domain.
|
4
|
+
# Original source code available here: http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/LongAdder.java?revision=1.8
|
5
|
+
#
|
6
|
+
# One or more variables that together maintain an initially zero
|
7
|
+
# sum. When updates (method +add+) are contended across threads,
|
8
|
+
# the set of variables may grow dynamically to reduce contention.
|
9
|
+
# Method +sum+ returns the current total combined across the
|
10
|
+
# variables maintaining the sum.
|
11
|
+
#
|
12
|
+
# This class is usually preferable to single +Atomic+ reference when
|
13
|
+
# multiple threads update a common sum that is used for purposes such
|
14
|
+
# as collecting statistics, not for fine-grained synchronization
|
15
|
+
# control. Under low update contention, the two classes have similar
|
16
|
+
# characteristics. But under high contention, expected throughput of
|
17
|
+
# this class is significantly higher, at the expense of higher space
|
18
|
+
# consumption.
|
19
|
+
class Adder < Striped64
|
20
|
+
# Adds the given value.
|
21
|
+
def add(x)
|
22
|
+
if (current_cells = cells) || !cas_base_computed {|current_base| current_base + x}
|
23
|
+
was_uncontended = true
|
24
|
+
hash = hash_code
|
25
|
+
unless current_cells && (cell = current_cells.volatile_get_by_hash(hash)) && (was_uncontended = cell.cas_computed {|current_value| current_value + x})
|
26
|
+
retry_update(x, hash, was_uncontended) {|current_value| current_value + x}
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
def increment
|
32
|
+
add(1)
|
33
|
+
end
|
34
|
+
|
35
|
+
def decrement
|
36
|
+
add(-1)
|
37
|
+
end
|
38
|
+
|
39
|
+
# Returns the current sum. The returned value is _NOT_ an
|
40
|
+
# atomic snapshot: Invocation in the absence of concurrent
|
41
|
+
# updates returns an accurate result, but concurrent updates that
|
42
|
+
# occur while the sum is being calculated might not be
|
43
|
+
# incorporated.
|
44
|
+
def sum
|
45
|
+
x = base
|
46
|
+
if current_cells = cells
|
47
|
+
current_cells.each do |cell|
|
48
|
+
x += cell.value if cell
|
49
|
+
end
|
50
|
+
end
|
51
|
+
x
|
52
|
+
end
|
53
|
+
|
54
|
+
def reset
|
55
|
+
internal_reset(0)
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
@@ -0,0 +1,12 @@
|
|
1
|
+
module ThreadSafe
|
2
|
+
module Util
|
3
|
+
# An overhead-less atomic reference.
|
4
|
+
AtomicReference =
|
5
|
+
if defined?(Rubinius::AtomicReference)
|
6
|
+
Rubinius::AtomicReference
|
7
|
+
else
|
8
|
+
require 'atomic'
|
9
|
+
defined?(Atomic::InternalReference) ? Atomic::InternalReference : Atomic
|
10
|
+
end
|
11
|
+
end
|
12
|
+
end
|
@@ -0,0 +1,105 @@
|
|
1
|
+
module ThreadSafe
|
2
|
+
module Util
|
3
|
+
# Provides a cheapest possible (mainly in terms of memory usage) +Mutex+ with the +ConditionVariable+ bundled in.
|
4
|
+
#
|
5
|
+
# Usage:
|
6
|
+
# class A
|
7
|
+
# include CheapLockable
|
8
|
+
#
|
9
|
+
# def do_exlusively
|
10
|
+
# cheap_synchronize { yield }
|
11
|
+
# end
|
12
|
+
#
|
13
|
+
# def wait_for_something
|
14
|
+
# cheap_synchronize do
|
15
|
+
# cheap_wait until resource_available?
|
16
|
+
# do_something
|
17
|
+
# cheap_broadcast # wake up others
|
18
|
+
# end
|
19
|
+
# end
|
20
|
+
# end
|
21
|
+
module CheapLockable
|
22
|
+
private
|
23
|
+
engine = defined?(RUBY_ENGINE) && RUBY_ENGINE
|
24
|
+
if engine == 'rbx'
|
25
|
+
# Making use of the Rubinius' ability to lock via object headers to avoid the overhead of the extra Mutex objects.
|
26
|
+
def cheap_synchronize
|
27
|
+
Rubinius.lock(self)
|
28
|
+
begin
|
29
|
+
yield
|
30
|
+
ensure
|
31
|
+
Rubinius.unlock(self)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
def cheap_wait
|
36
|
+
wchan = Rubinius::Channel.new
|
37
|
+
|
38
|
+
begin
|
39
|
+
waiters = @waiters ||= []
|
40
|
+
waiters.push wchan
|
41
|
+
Rubinius.unlock(self)
|
42
|
+
signaled = wchan.receive_timeout nil
|
43
|
+
ensure
|
44
|
+
Rubinius.lock(self)
|
45
|
+
|
46
|
+
unless signaled or waiters.delete(wchan)
|
47
|
+
# we timed out, but got signaled afterwards (e.g. while waiting to
|
48
|
+
# acquire @lock), so pass that signal on to the next waiter
|
49
|
+
waiters.shift << true unless waiters.empty?
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
self
|
54
|
+
end
|
55
|
+
|
56
|
+
def cheap_broadcast
|
57
|
+
waiters = @waiters ||= []
|
58
|
+
waiters.shift << true until waiters.empty?
|
59
|
+
self
|
60
|
+
end
|
61
|
+
elsif engine == 'jruby'
|
62
|
+
# Use Java's native synchronized (this) { wait(); notifyAll(); } to avoid the overhead of the extra Mutex objects
|
63
|
+
require 'jruby'
|
64
|
+
|
65
|
+
def cheap_synchronize
|
66
|
+
JRuby.reference0(self).synchronized { yield }
|
67
|
+
end
|
68
|
+
|
69
|
+
def cheap_wait
|
70
|
+
JRuby.reference0(self).wait
|
71
|
+
end
|
72
|
+
|
73
|
+
def cheap_broadcast
|
74
|
+
JRuby.reference0(self).notify_all
|
75
|
+
end
|
76
|
+
else
|
77
|
+
require 'thread'
|
78
|
+
|
79
|
+
extend Volatile
|
80
|
+
attr_volatile :mutex
|
81
|
+
|
82
|
+
# Non-reentrant Mutex#syncrhonize
|
83
|
+
def cheap_synchronize
|
84
|
+
true until (my_mutex = mutex) || cas_mutex(nil, my_mutex = Mutex.new)
|
85
|
+
my_mutex.synchronize { yield }
|
86
|
+
end
|
87
|
+
|
88
|
+
# Releases this object's +cheap_synchronize+ lock and goes to sleep waiting for other threads to +cheap_broadcast+, reacquires the lock on wakeup.
|
89
|
+
# Must only be called in +cheap_broadcast+'s block.
|
90
|
+
def cheap_wait
|
91
|
+
conditional_variable = @conditional_variable ||= ConditionVariable.new
|
92
|
+
conditional_variable.wait(mutex)
|
93
|
+
end
|
94
|
+
|
95
|
+
# Wakes up all threads waiting for this object's +cheap_synchronize+ lock.
|
96
|
+
# Must only be called in +cheap_broadcast+'s block.
|
97
|
+
def cheap_broadcast
|
98
|
+
if conditional_variable = @conditional_variable
|
99
|
+
conditional_variable.broadcast
|
100
|
+
end
|
101
|
+
end
|
102
|
+
end
|
103
|
+
end
|
104
|
+
end
|
105
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
module ThreadSafe
|
2
|
+
module Util
|
3
|
+
class PowerOfTwoTuple < VolatileTuple
|
4
|
+
def initialize(size)
|
5
|
+
raise ArgumentError, "size must be a power of 2 (#{size.inspect} provided)" unless size > 0 && size & (size - 1) == 0
|
6
|
+
super(size)
|
7
|
+
end
|
8
|
+
|
9
|
+
def hash_to_index(hash)
|
10
|
+
(size - 1) & hash
|
11
|
+
end
|
12
|
+
|
13
|
+
def volatile_get_by_hash(hash)
|
14
|
+
volatile_get(hash_to_index(hash))
|
15
|
+
end
|
16
|
+
|
17
|
+
def volatile_set_by_hash(hash, value)
|
18
|
+
volatile_set(hash_to_index(hash), value)
|
19
|
+
end
|
20
|
+
|
21
|
+
def next_in_size_table
|
22
|
+
self.class.new(size << 1)
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
@@ -0,0 +1,226 @@
|
|
1
|
+
module ThreadSafe
|
2
|
+
module Util
|
3
|
+
# A Ruby port of the Doug Lea's jsr166e.Striped64 class version 1.6 available in public domain.
|
4
|
+
# Original source code available here: http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/Striped64.java?revision=1.6
|
5
|
+
#
|
6
|
+
# Class holding common representation and mechanics for classes supporting dynamic striping on 64bit values.
|
7
|
+
#
|
8
|
+
# This class maintains a lazily-initialized table of atomically
|
9
|
+
# updated variables, plus an extra +base+ field. The table size
|
10
|
+
# is a power of two. Indexing uses masked per-thread hash codes.
|
11
|
+
# Nearly all methods on this class are private, accessed directly
|
12
|
+
# by subclasses.
|
13
|
+
#
|
14
|
+
# Table entries are of class +Cell+; a variant of AtomicLong padded
|
15
|
+
# to reduce cache contention on most processors. Padding is
|
16
|
+
# overkill for most Atomics because they are usually irregularly
|
17
|
+
# scattered in memory and thus don't interfere much with each
|
18
|
+
# other. But Atomic objects residing in arrays will tend to be
|
19
|
+
# placed adjacent to each other, and so will most often share
|
20
|
+
# cache lines (with a huge negative performance impact) without
|
21
|
+
# this precaution.
|
22
|
+
#
|
23
|
+
# In part because +Cell+s are relatively large, we avoid creating
|
24
|
+
# them until they are needed. When there is no contention, all
|
25
|
+
# updates are made to the +base+ field. Upon first contention (a
|
26
|
+
# failed CAS on +base+ update), the table is initialized to size 2.
|
27
|
+
# The table size is doubled upon further contention until
|
28
|
+
# reaching the nearest power of two greater than or equal to the
|
29
|
+
# number of CPUS. Table slots remain empty (+nil+) until they are
|
30
|
+
# needed.
|
31
|
+
#
|
32
|
+
# A single spinlock (+busy+) is used for initializing and
|
33
|
+
# resizing the table, as well as populating slots with new +Cell+s.
|
34
|
+
# There is no need for a blocking lock: When the lock is not
|
35
|
+
# available, threads try other slots (or the base). During these
|
36
|
+
# retries, there is increased contention and reduced locality,
|
37
|
+
# which is still better than alternatives.
|
38
|
+
#
|
39
|
+
# Per-thread hash codes are initialized to random values.
|
40
|
+
# Contention and/or table collisions are indicated by failed
|
41
|
+
# CASes when performing an update operation (see method
|
42
|
+
# +retry_update+). Upon a collision, if the table size is less than
|
43
|
+
# the capacity, it is doubled in size unless some other thread
|
44
|
+
# holds the lock. If a hashed slot is empty, and lock is
|
45
|
+
# available, a new +Cell+ is created. Otherwise, if the slot
|
46
|
+
# exists, a CAS is tried. Retries proceed by "double hashing",
|
47
|
+
# using a secondary hash (XorShift) to try to find a
|
48
|
+
# free slot.
|
49
|
+
#
|
50
|
+
# The table size is capped because, when there are more threads
|
51
|
+
# than CPUs, supposing that each thread were bound to a CPU,
|
52
|
+
# there would exist a perfect hash function mapping threads to
|
53
|
+
# slots that eliminates collisions. When we reach capacity, we
|
54
|
+
# search for this mapping by randomly varying the hash codes of
|
55
|
+
# colliding threads. Because search is random, and collisions
|
56
|
+
# only become known via CAS failures, convergence can be slow,
|
57
|
+
# and because threads are typically not bound to CPUS forever,
|
58
|
+
# may not occur at all. However, despite these limitations,
|
59
|
+
# observed contention rates are typically low in these cases.
|
60
|
+
#
|
61
|
+
# It is possible for a +Cell+ to become unused when threads that
|
62
|
+
# once hashed to it terminate, as well as in the case where
|
63
|
+
# doubling the table causes no thread to hash to it under
|
64
|
+
# expanded mask. We do not try to detect or remove such cells,
|
65
|
+
# under the assumption that for long-running instances, observed
|
66
|
+
# contention levels will recur, so the cells will eventually be
|
67
|
+
# needed again; and for short-lived ones, it does not matter.
|
68
|
+
class Striped64
|
69
|
+
# Padded variant of AtomicLong supporting only raw accesses plus CAS.
|
70
|
+
# The +value+ field is placed between pads, hoping that the JVM doesn't
|
71
|
+
# reorder them.
|
72
|
+
#
|
73
|
+
# Optimisation note: It would be possible to use a release-only
|
74
|
+
# form of CAS here, if it were provided.
|
75
|
+
class Cell < AtomicReference
|
76
|
+
# TODO: this only adds padding after the :value slot, need to find a way to add padding before the slot
|
77
|
+
attr_reader *(Array.new(12).map {|i| :"padding_#{i}"})
|
78
|
+
|
79
|
+
alias_method :cas, :compare_and_set
|
80
|
+
|
81
|
+
def cas_computed
|
82
|
+
cas(current_value = value, yield(current_value))
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
86
|
+
extend Volatile
|
87
|
+
attr_volatile :cells, # Table of cells. When non-null, size is a power of 2.
|
88
|
+
:base, # Base value, used mainly when there is no contention, but also as a fallback during table initialization races. Updated via CAS.
|
89
|
+
:busy # Spinlock (locked via CAS) used when resizing and/or creating Cells.
|
90
|
+
|
91
|
+
alias_method :busy?, :busy
|
92
|
+
|
93
|
+
def initialize
|
94
|
+
super()
|
95
|
+
self.busy = false
|
96
|
+
self.base = 0
|
97
|
+
end
|
98
|
+
|
99
|
+
# Handles cases of updates involving initialization, resizing,
|
100
|
+
# creating new Cells, and/or contention. See above for
|
101
|
+
# explanation. This method suffers the usual non-modularity
|
102
|
+
# problems of optimistic retry code, relying on rechecked sets of
|
103
|
+
# reads.
|
104
|
+
#
|
105
|
+
# Arguments:
|
106
|
+
# [+x+]
|
107
|
+
# the value
|
108
|
+
# [+hash_code+]
|
109
|
+
# hash code used
|
110
|
+
# [+x+]
|
111
|
+
# false if CAS failed before call
|
112
|
+
def retry_update(x, hash_code, was_uncontended) # :yields: current_value
|
113
|
+
hash = hash_code
|
114
|
+
collided = false # True if last slot nonempty
|
115
|
+
while true
|
116
|
+
if current_cells = cells
|
117
|
+
if !(cell = current_cells.volatile_get_by_hash(hash))
|
118
|
+
if busy?
|
119
|
+
collided = false
|
120
|
+
else # Try to attach new Cell
|
121
|
+
if try_to_install_new_cell(Cell.new(x), hash) # Optimistically create and try to insert new cell
|
122
|
+
break
|
123
|
+
else
|
124
|
+
redo # Slot is now non-empty
|
125
|
+
end
|
126
|
+
end
|
127
|
+
elsif !was_uncontended # CAS already known to fail
|
128
|
+
was_uncontended = true # Continue after rehash
|
129
|
+
elsif cell.cas_computed {|current_value| yield current_value}
|
130
|
+
break
|
131
|
+
elsif current_cells.size >= CPU_COUNT || cells != current_cells # At max size or stale
|
132
|
+
collided = false
|
133
|
+
elsif collided && expand_table_unless_stale(current_cells)
|
134
|
+
collided = false
|
135
|
+
redo # Retry with expanded table
|
136
|
+
else
|
137
|
+
collided = true
|
138
|
+
end
|
139
|
+
hash = XorShiftRandom.xorshift(hash)
|
140
|
+
|
141
|
+
elsif try_initialize_cells(x, hash) || cas_base_computed {|current_base| yield current_base}
|
142
|
+
break
|
143
|
+
end
|
144
|
+
end
|
145
|
+
self.hash_code = hash
|
146
|
+
end
|
147
|
+
|
148
|
+
private
|
149
|
+
# Static per-thread hash code key. Shared across all instances to
|
150
|
+
# reduce Thread locals pollution and because adjustments due to
|
151
|
+
# collisions in one table are likely to be appropriate for
|
152
|
+
# others.
|
153
|
+
THREAD_LOCAL_KEY = "#{name}.hash_code".to_sym
|
154
|
+
|
155
|
+
# A thread-local hash code accessor. The code is initially
|
156
|
+
# random, but may be set to a different value upon collisions.
|
157
|
+
def hash_code
|
158
|
+
Thread.current[THREAD_LOCAL_KEY] ||= XorShiftRandom.get
|
159
|
+
end
|
160
|
+
|
161
|
+
def hash_code=(hash)
|
162
|
+
Thread.current[THREAD_LOCAL_KEY] = hash
|
163
|
+
end
|
164
|
+
|
165
|
+
# Sets base and all +cells+ to the given value.
|
166
|
+
def internal_reset(initial_value)
|
167
|
+
current_cells = cells
|
168
|
+
self.base = initial_value
|
169
|
+
if current_cells
|
170
|
+
current_cells.each do |cell|
|
171
|
+
cell.value = initial_value if cell
|
172
|
+
end
|
173
|
+
end
|
174
|
+
end
|
175
|
+
|
176
|
+
def cas_base_computed
|
177
|
+
cas_base(current_base = base, yield(current_base))
|
178
|
+
end
|
179
|
+
|
180
|
+
def free?
|
181
|
+
!busy?
|
182
|
+
end
|
183
|
+
|
184
|
+
def try_initialize_cells(x, hash)
|
185
|
+
if free? && !cells
|
186
|
+
try_in_busy do
|
187
|
+
unless cells # Recheck under lock
|
188
|
+
new_cells = PowerOfTwoTuple.new(2)
|
189
|
+
new_cells.volatile_set_by_hash(hash, Cell.new(x))
|
190
|
+
self.cells = new_cells
|
191
|
+
end
|
192
|
+
end
|
193
|
+
end
|
194
|
+
end
|
195
|
+
|
196
|
+
def expand_table_unless_stale(current_cells)
|
197
|
+
try_in_busy do
|
198
|
+
if current_cells == cells # Recheck under lock
|
199
|
+
new_cells = current_cells.next_in_size_table
|
200
|
+
current_cells.each_with_index {|x, i| new_cells.volatile_set(i, x)}
|
201
|
+
self.cells = new_cells
|
202
|
+
end
|
203
|
+
end
|
204
|
+
end
|
205
|
+
|
206
|
+
def try_to_install_new_cell(new_cell, hash)
|
207
|
+
try_in_busy do
|
208
|
+
# Recheck under lock
|
209
|
+
if (current_cells = cells) && !current_cells.volatile_get(i = current_cells.hash_to_index(hash))
|
210
|
+
current_cells.volatile_set(i, new_cell)
|
211
|
+
end
|
212
|
+
end
|
213
|
+
end
|
214
|
+
|
215
|
+
def try_in_busy
|
216
|
+
if cas_busy(false, true)
|
217
|
+
begin
|
218
|
+
yield
|
219
|
+
ensure
|
220
|
+
self.busy = false
|
221
|
+
end
|
222
|
+
end
|
223
|
+
end
|
224
|
+
end
|
225
|
+
end
|
226
|
+
end
|
@@ -0,0 +1,62 @@
|
|
1
|
+
module ThreadSafe
|
2
|
+
module Util
|
3
|
+
module Volatile
|
4
|
+
# Provides +volatile+ (in the JVM's sense) attribute accessors implemented atop of the +AtomicReference+s.
|
5
|
+
# Usage:
|
6
|
+
# class Foo
|
7
|
+
# extend ThreadSafe::Util::Volatile
|
8
|
+
# attr_volatile :foo, :bar
|
9
|
+
#
|
10
|
+
# def initialize(bar)
|
11
|
+
# super() # must super() into parent initializers before using the volatile attribute accessors
|
12
|
+
# self.bar = bar
|
13
|
+
# end
|
14
|
+
#
|
15
|
+
# def hello
|
16
|
+
# my_foo = foo # volatile read
|
17
|
+
# self.foo = 1 # volatile write
|
18
|
+
# cas_foo(1, 2) # => true | a strong CAS
|
19
|
+
# end
|
20
|
+
# end
|
21
|
+
def attr_volatile(*attr_names)
|
22
|
+
return if attr_names.empty?
|
23
|
+
include(Module.new do
|
24
|
+
atomic_ref_setup = attr_names.map {|attr_name| "@__#{attr_name} = ThreadSafe::Util::AtomicReference.new"}
|
25
|
+
initialize_copy_setup = attr_names.zip(atomic_ref_setup).map do |attr_name, ref_setup|
|
26
|
+
"#{ref_setup}(other.instance_variable_get(:@__#{attr_name}).get)"
|
27
|
+
end
|
28
|
+
class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1
|
29
|
+
def initialize(*)
|
30
|
+
super
|
31
|
+
#{atomic_ref_setup.join('; ')}
|
32
|
+
end
|
33
|
+
|
34
|
+
def initialize_copy(other)
|
35
|
+
super
|
36
|
+
#{initialize_copy_setup.join('; ')}
|
37
|
+
end
|
38
|
+
RUBY_EVAL
|
39
|
+
|
40
|
+
attr_names.each do |attr_name|
|
41
|
+
class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1
|
42
|
+
def #{attr_name}
|
43
|
+
@__#{attr_name}.get
|
44
|
+
end
|
45
|
+
|
46
|
+
def #{attr_name}=(value)
|
47
|
+
@__#{attr_name}.set(value)
|
48
|
+
end
|
49
|
+
|
50
|
+
def compare_and_set_#{attr_name}(old_value, new_value)
|
51
|
+
@__#{attr_name}.compare_and_set(old_value, new_value)
|
52
|
+
end
|
53
|
+
RUBY_EVAL
|
54
|
+
|
55
|
+
alias_method :"cas_#{attr_name}", :"compare_and_set_#{attr_name}"
|
56
|
+
alias_method :"lazy_set_#{attr_name}", :"#{attr_name}="
|
57
|
+
end
|
58
|
+
end)
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|