concurrent-ruby 0.9.2 → 1.0.0.pre1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +15 -1
- data/README.md +67 -68
- data/lib/concurrent.rb +14 -1
- data/lib/concurrent/array.rb +38 -0
- data/lib/concurrent/async.rb +0 -17
- data/lib/concurrent/atomic/abstract_thread_local_var.rb +40 -0
- data/lib/concurrent/atomic/atomic_boolean.rb +81 -118
- data/lib/concurrent/atomic/atomic_fixnum.rb +98 -162
- data/lib/concurrent/atomic/atomic_reference.rb +0 -7
- data/lib/concurrent/atomic/count_down_latch.rb +62 -103
- data/lib/concurrent/atomic/cyclic_barrier.rb +2 -0
- data/lib/concurrent/atomic/java_count_down_latch.rb +39 -0
- data/lib/concurrent/atomic/java_thread_local_var.rb +50 -0
- data/lib/concurrent/atomic/mutex_atomic_boolean.rb +60 -0
- data/lib/concurrent/atomic/mutex_atomic_fixnum.rb +91 -0
- data/lib/concurrent/atomic/mutex_count_down_latch.rb +43 -0
- data/lib/concurrent/atomic/mutex_semaphore.rb +115 -0
- data/lib/concurrent/atomic/ruby_thread_local_var.rb +172 -0
- data/lib/concurrent/atomic/semaphore.rb +84 -178
- data/lib/concurrent/atomic/thread_local_var.rb +63 -294
- data/lib/concurrent/atomic_reference/mutex_atomic.rb +14 -8
- data/lib/concurrent/atomics.rb +0 -33
- data/lib/concurrent/collection/java_non_concurrent_priority_queue.rb +84 -0
- data/lib/concurrent/collection/map/atomic_reference_map_backend.rb +921 -0
- data/lib/concurrent/collection/map/mri_map_backend.rb +66 -0
- data/lib/concurrent/collection/map/non_concurrent_map_backend.rb +142 -0
- data/lib/concurrent/collection/map/synchronized_map_backend.rb +86 -0
- data/lib/concurrent/collection/non_concurrent_priority_queue.rb +143 -0
- data/lib/concurrent/collection/ruby_non_concurrent_priority_queue.rb +150 -0
- data/lib/concurrent/concern/logging.rb +1 -1
- data/lib/concurrent/concern/obligation.rb +0 -12
- data/lib/concurrent/configuration.rb +18 -148
- data/lib/concurrent/delay.rb +5 -4
- data/lib/concurrent/exchanger.rb +327 -41
- data/lib/concurrent/executor/abstract_executor_service.rb +134 -0
- data/lib/concurrent/executor/executor.rb +4 -29
- data/lib/concurrent/executor/executor_service.rb +23 -359
- data/lib/concurrent/executor/immediate_executor.rb +3 -2
- data/lib/concurrent/executor/java_executor_service.rb +100 -0
- data/lib/concurrent/executor/java_single_thread_executor.rb +3 -2
- data/lib/concurrent/executor/java_thread_pool_executor.rb +3 -4
- data/lib/concurrent/executor/ruby_executor_service.rb +72 -0
- data/lib/concurrent/executor/ruby_single_thread_executor.rb +7 -5
- data/lib/concurrent/executor/ruby_thread_pool_executor.rb +3 -11
- data/lib/concurrent/executor/safe_task_executor.rb +1 -1
- data/lib/concurrent/executor/serial_executor_service.rb +34 -0
- data/lib/concurrent/executor/serialized_execution.rb +8 -31
- data/lib/concurrent/executor/serialized_execution_delegator.rb +28 -0
- data/lib/concurrent/executor/simple_executor_service.rb +1 -10
- data/lib/concurrent/executor/timer_set.rb +4 -8
- data/lib/concurrent/executors.rb +13 -2
- data/lib/concurrent/future.rb +2 -2
- data/lib/concurrent/hash.rb +35 -0
- data/lib/concurrent/ivar.rb +9 -14
- data/lib/concurrent/map.rb +178 -0
- data/lib/concurrent/promise.rb +2 -2
- data/lib/concurrent/scheduled_task.rb +9 -69
- data/lib/concurrent/thread_safe/synchronized_delegator.rb +50 -0
- data/lib/concurrent/thread_safe/util.rb +23 -0
- data/lib/concurrent/thread_safe/util/adder.rb +71 -0
- data/lib/concurrent/thread_safe/util/array_hash_rbx.rb +28 -0
- data/lib/concurrent/thread_safe/util/cheap_lockable.rb +115 -0
- data/lib/concurrent/thread_safe/util/power_of_two_tuple.rb +37 -0
- data/lib/concurrent/thread_safe/util/striped64.rb +236 -0
- data/lib/concurrent/thread_safe/util/volatile.rb +73 -0
- data/lib/concurrent/thread_safe/util/xor_shift_random.rb +48 -0
- data/lib/concurrent/timer_task.rb +3 -3
- data/lib/concurrent/tuple.rb +86 -0
- data/lib/concurrent/version.rb +2 -2
- metadata +37 -10
- data/lib/concurrent/atomic/condition.rb +0 -78
- data/lib/concurrent/collection/priority_queue.rb +0 -360
- data/lib/concurrent/utilities.rb +0 -5
- data/lib/concurrent/utility/timeout.rb +0 -39
- data/lib/concurrent/utility/timer.rb +0 -26
- data/lib/concurrent_ruby.rb +0 -2
@@ -0,0 +1,39 @@
|
|
1
|
+
if Concurrent.on_jruby?
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
|
5
|
+
# @!macro count_down_latch
|
6
|
+
# @!visibility private
|
7
|
+
# @!macro internal_implementation_note
|
8
|
+
class JavaCountDownLatch
|
9
|
+
|
10
|
+
# @!macro count_down_latch_method_initialize
|
11
|
+
def initialize(count = 1)
|
12
|
+
unless count.is_a?(Fixnum) && count >= 0
|
13
|
+
raise ArgumentError.new('count must be in integer greater than or equal zero')
|
14
|
+
end
|
15
|
+
@latch = java.util.concurrent.CountDownLatch.new(count)
|
16
|
+
end
|
17
|
+
|
18
|
+
# @!macro count_down_latch_method_wait
|
19
|
+
def wait(timeout = nil)
|
20
|
+
if timeout.nil?
|
21
|
+
@latch.await
|
22
|
+
true
|
23
|
+
else
|
24
|
+
@latch.await(1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
# @!macro count_down_latch_method_count_down
|
29
|
+
def count_down
|
30
|
+
@latch.countDown
|
31
|
+
end
|
32
|
+
|
33
|
+
# @!macro count_down_latch_method_count
|
34
|
+
def count
|
35
|
+
@latch.getCount
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
@@ -0,0 +1,50 @@
|
|
1
|
+
require 'concurrent/atomic/abstract_thread_local_var'
|
2
|
+
|
3
|
+
if Concurrent.on_jruby?
|
4
|
+
|
5
|
+
module Concurrent
|
6
|
+
|
7
|
+
# @!visibility private
|
8
|
+
# @!macro internal_implementation_note
|
9
|
+
class JavaThreadLocalVar < AbstractThreadLocalVar
|
10
|
+
|
11
|
+
# @!macro thread_local_var_method_get
|
12
|
+
def value
|
13
|
+
value = @var.get
|
14
|
+
|
15
|
+
if value.nil?
|
16
|
+
@default
|
17
|
+
elsif value == NIL_SENTINEL
|
18
|
+
nil
|
19
|
+
else
|
20
|
+
value
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
# @!macro thread_local_var_method_set
|
25
|
+
def value=(value)
|
26
|
+
@var.set(value)
|
27
|
+
end
|
28
|
+
|
29
|
+
# @!macro thread_local_var_method_bind
|
30
|
+
def bind(value, &block)
|
31
|
+
if block_given?
|
32
|
+
old_value = @var.get
|
33
|
+
begin
|
34
|
+
@var.set(value)
|
35
|
+
yield
|
36
|
+
ensure
|
37
|
+
@var.set(old_value)
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
protected
|
43
|
+
|
44
|
+
# @!visibility private
|
45
|
+
def allocate_storage
|
46
|
+
@var = java.lang.ThreadLocal.new
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
@@ -0,0 +1,60 @@
|
|
1
|
+
require 'concurrent/synchronization'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
|
5
|
+
# @!macro atomic_boolean
|
6
|
+
# @!visibility private
|
7
|
+
# @!macro internal_implementation_note
|
8
|
+
class MutexAtomicBoolean < Synchronization::Object
|
9
|
+
|
10
|
+
# @!macro atomic_boolean_method_initialize
|
11
|
+
def initialize(initial = false)
|
12
|
+
super()
|
13
|
+
synchronize { ns_initialize(initial) }
|
14
|
+
end
|
15
|
+
|
16
|
+
# @!macro atomic_boolean_method_value_get
|
17
|
+
def value
|
18
|
+
synchronize { @value }
|
19
|
+
end
|
20
|
+
|
21
|
+
# @!macro atomic_boolean_method_value_set
|
22
|
+
def value=(value)
|
23
|
+
synchronize { @value = !!value }
|
24
|
+
end
|
25
|
+
|
26
|
+
# @!macro atomic_boolean_method_true_question
|
27
|
+
def true?
|
28
|
+
synchronize { @value }
|
29
|
+
end
|
30
|
+
|
31
|
+
# @!macro atomic_boolean_method_false_question
|
32
|
+
def false?
|
33
|
+
synchronize { !@value }
|
34
|
+
end
|
35
|
+
|
36
|
+
# @!macro atomic_boolean_method_make_true
|
37
|
+
def make_true
|
38
|
+
synchronize { ns_make_value(true) }
|
39
|
+
end
|
40
|
+
|
41
|
+
# @!macro atomic_boolean_method_make_false
|
42
|
+
def make_false
|
43
|
+
synchronize { ns_make_value(false) }
|
44
|
+
end
|
45
|
+
|
46
|
+
protected
|
47
|
+
|
48
|
+
# @!visibility private
|
49
|
+
def ns_initialize(initial)
|
50
|
+
@value = !!initial
|
51
|
+
end
|
52
|
+
|
53
|
+
# @!visibility private
|
54
|
+
def ns_make_value(value)
|
55
|
+
old = @value
|
56
|
+
@value = value
|
57
|
+
old != @value
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
@@ -0,0 +1,91 @@
|
|
1
|
+
require 'concurrent/synchronization'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
|
5
|
+
# @!macro atomic_fixnum
|
6
|
+
# @!visibility private
|
7
|
+
# @!macro internal_implementation_note
|
8
|
+
class MutexAtomicFixnum < Synchronization::Object
|
9
|
+
|
10
|
+
# http://stackoverflow.com/questions/535721/ruby-max-integer
|
11
|
+
MIN_VALUE = -(2**(0.size * 8 - 2))
|
12
|
+
MAX_VALUE = (2**(0.size * 8 - 2) - 1)
|
13
|
+
|
14
|
+
# @!macro atomic_fixnum_method_initialize
|
15
|
+
def initialize(initial = 0)
|
16
|
+
super()
|
17
|
+
synchronize { ns_initialize(initial) }
|
18
|
+
end
|
19
|
+
|
20
|
+
# @!macro atomic_fixnum_method_value_get
|
21
|
+
def value
|
22
|
+
synchronize { @value }
|
23
|
+
end
|
24
|
+
|
25
|
+
# @!macro atomic_fixnum_method_value_set
|
26
|
+
def value=(value)
|
27
|
+
synchronize { ns_set(value) }
|
28
|
+
end
|
29
|
+
|
30
|
+
# @!macro atomic_fixnum_method_increment
|
31
|
+
def increment(delta = 1)
|
32
|
+
synchronize { ns_set(@value + delta.to_i) }
|
33
|
+
end
|
34
|
+
|
35
|
+
alias_method :up, :increment
|
36
|
+
|
37
|
+
# @!macro atomic_fixnum_method_decrement
|
38
|
+
def decrement(delta = 1)
|
39
|
+
synchronize { ns_set(@value - delta.to_i) }
|
40
|
+
end
|
41
|
+
|
42
|
+
alias_method :down, :decrement
|
43
|
+
|
44
|
+
# @!macro atomic_fixnum_method_compare_and_set
|
45
|
+
def compare_and_set(expect, update)
|
46
|
+
synchronize do
|
47
|
+
if @value == expect.to_i
|
48
|
+
@value = update.to_i
|
49
|
+
true
|
50
|
+
else
|
51
|
+
false
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
# @!macro atomic_fixnum_method_update
|
57
|
+
def update
|
58
|
+
synchronize do
|
59
|
+
@value = yield @value
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
protected
|
64
|
+
|
65
|
+
# @!visibility private
|
66
|
+
def ns_initialize(initial)
|
67
|
+
ns_set(initial)
|
68
|
+
end
|
69
|
+
|
70
|
+
private
|
71
|
+
|
72
|
+
# @!visibility private
|
73
|
+
def ns_set(value)
|
74
|
+
range_check!(value)
|
75
|
+
@value = value
|
76
|
+
end
|
77
|
+
|
78
|
+
# @!visibility private
|
79
|
+
def range_check!(value)
|
80
|
+
if !value.is_a?(Fixnum)
|
81
|
+
raise ArgumentError.new('value value must be a Fixnum')
|
82
|
+
elsif value > MAX_VALUE
|
83
|
+
raise RangeError.new("#{value} is greater than the maximum value of #{MAX_VALUE}")
|
84
|
+
elsif value < MIN_VALUE
|
85
|
+
raise RangeError.new("#{value} is less than the maximum value of #{MIN_VALUE}")
|
86
|
+
else
|
87
|
+
value
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
91
|
+
end
|
@@ -0,0 +1,43 @@
|
|
1
|
+
require 'concurrent/synchronization'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
|
5
|
+
# @!macro count_down_latch
|
6
|
+
# @!visibility private
|
7
|
+
# @!macro internal_implementation_note
|
8
|
+
class MutexCountDownLatch < Synchronization::Object
|
9
|
+
|
10
|
+
# @!macro count_down_latch_method_initialize
|
11
|
+
def initialize(count = 1)
|
12
|
+
unless count.is_a?(Fixnum) && count >= 0
|
13
|
+
raise ArgumentError.new('count must be in integer greater than or equal zero')
|
14
|
+
end
|
15
|
+
super()
|
16
|
+
synchronize { ns_initialize count }
|
17
|
+
end
|
18
|
+
|
19
|
+
# @!macro count_down_latch_method_wait
|
20
|
+
def wait(timeout = nil)
|
21
|
+
synchronize { ns_wait_until(timeout) { @count == 0 } }
|
22
|
+
end
|
23
|
+
|
24
|
+
# @!macro count_down_latch_method_count_down
|
25
|
+
def count_down
|
26
|
+
synchronize do
|
27
|
+
@count -= 1 if @count > 0
|
28
|
+
ns_broadcast if @count == 0
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
# @!macro count_down_latch_method_count
|
33
|
+
def count
|
34
|
+
synchronize { @count }
|
35
|
+
end
|
36
|
+
|
37
|
+
protected
|
38
|
+
|
39
|
+
def ns_initialize(count)
|
40
|
+
@count = count
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
@@ -0,0 +1,115 @@
|
|
1
|
+
require 'concurrent/synchronization'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
|
5
|
+
# @!macro semaphore
|
6
|
+
# @!visibility private
|
7
|
+
# @!macro internal_implementation_note
|
8
|
+
class MutexSemaphore < Synchronization::Object
|
9
|
+
|
10
|
+
# @!macro semaphore_method_initialize
|
11
|
+
def initialize(count)
|
12
|
+
unless count.is_a?(Fixnum) && count >= 0
|
13
|
+
fail ArgumentError, 'count must be an non-negative integer'
|
14
|
+
end
|
15
|
+
super()
|
16
|
+
synchronize { ns_initialize count }
|
17
|
+
end
|
18
|
+
|
19
|
+
# @!macro semaphore_method_acquire
|
20
|
+
def acquire(permits = 1)
|
21
|
+
unless permits.is_a?(Fixnum) && permits > 0
|
22
|
+
fail ArgumentError, 'permits must be an integer greater than zero'
|
23
|
+
end
|
24
|
+
synchronize do
|
25
|
+
try_acquire_timed(permits, nil)
|
26
|
+
nil
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
# @!macro semaphore_method_available_permits
|
31
|
+
def available_permits
|
32
|
+
synchronize { @free }
|
33
|
+
end
|
34
|
+
|
35
|
+
# @!macro semaphore_method_drain_permits
|
36
|
+
#
|
37
|
+
# Acquires and returns all permits that are immediately available.
|
38
|
+
#
|
39
|
+
# @return [Integer]
|
40
|
+
def drain_permits
|
41
|
+
synchronize do
|
42
|
+
@free.tap { |_| @free = 0 }
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
# @!macro semaphore_method_try_acquire
|
47
|
+
def try_acquire(permits = 1, timeout = nil)
|
48
|
+
unless permits.is_a?(Fixnum) && permits > 0
|
49
|
+
fail ArgumentError, 'permits must be an integer greater than zero'
|
50
|
+
end
|
51
|
+
synchronize do
|
52
|
+
if timeout.nil?
|
53
|
+
try_acquire_now(permits)
|
54
|
+
else
|
55
|
+
try_acquire_timed(permits, timeout)
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
# @!macro semaphore_method_release
|
61
|
+
def release(permits = 1)
|
62
|
+
unless permits.is_a?(Fixnum) && permits > 0
|
63
|
+
fail ArgumentError, 'permits must be an integer greater than zero'
|
64
|
+
end
|
65
|
+
synchronize do
|
66
|
+
@free += permits
|
67
|
+
permits.times { ns_signal }
|
68
|
+
end
|
69
|
+
nil
|
70
|
+
end
|
71
|
+
|
72
|
+
# Shrinks the number of available permits by the indicated reduction.
|
73
|
+
#
|
74
|
+
# @param [Fixnum] reduction Number of permits to remove.
|
75
|
+
#
|
76
|
+
# @raise [ArgumentError] if `reduction` is not an integer or is negative
|
77
|
+
#
|
78
|
+
# @raise [ArgumentError] if `@free` - `@reduction` is less than zero
|
79
|
+
#
|
80
|
+
# @return [nil]
|
81
|
+
#
|
82
|
+
# @!visibility private
|
83
|
+
def reduce_permits(reduction)
|
84
|
+
unless reduction.is_a?(Fixnum) && reduction >= 0
|
85
|
+
fail ArgumentError, 'reduction must be an non-negative integer'
|
86
|
+
end
|
87
|
+
synchronize { @free -= reduction }
|
88
|
+
nil
|
89
|
+
end
|
90
|
+
|
91
|
+
protected
|
92
|
+
|
93
|
+
# @!visibility private
|
94
|
+
def ns_initialize(count)
|
95
|
+
@free = count
|
96
|
+
end
|
97
|
+
|
98
|
+
private
|
99
|
+
|
100
|
+
# @!visibility private
|
101
|
+
def try_acquire_now(permits)
|
102
|
+
if @free >= permits
|
103
|
+
@free -= permits
|
104
|
+
true
|
105
|
+
else
|
106
|
+
false
|
107
|
+
end
|
108
|
+
end
|
109
|
+
|
110
|
+
# @!visibility private
|
111
|
+
def try_acquire_timed(permits, timeout)
|
112
|
+
ns_wait_until(timeout) { try_acquire_now(permits) }
|
113
|
+
end
|
114
|
+
end
|
115
|
+
end
|
@@ -0,0 +1,172 @@
|
|
1
|
+
require 'thread'
|
2
|
+
require 'concurrent/atomic/abstract_thread_local_var'
|
3
|
+
|
4
|
+
module Concurrent
|
5
|
+
|
6
|
+
# @!visibility private
|
7
|
+
# @!macro internal_implementation_note
|
8
|
+
class RubyThreadLocalVar < AbstractThreadLocalVar
|
9
|
+
|
10
|
+
# Each thread has a (lazily initialized) array of thread-local variable values
|
11
|
+
# Each time a new thread-local var is created, we allocate an "index" for it
|
12
|
+
# For example, if the allocated index is 1, that means slot #1 in EVERY
|
13
|
+
# thread's thread-local array will be used for the value of that TLV
|
14
|
+
#
|
15
|
+
# The good thing about using a per-THREAD structure to hold values, rather
|
16
|
+
# than a per-TLV structure, is that no synchronization is needed when
|
17
|
+
# reading and writing those values (since the structure is only ever
|
18
|
+
# accessed by a single thread)
|
19
|
+
#
|
20
|
+
# Of course, when a TLV is GC'd, 1) we need to recover its index for use
|
21
|
+
# by other new TLVs (otherwise the thread-local arrays could get bigger
|
22
|
+
# and bigger with time), and 2) we need to null out all the references
|
23
|
+
# held in the now-unused slots (both to avoid blocking GC of those objects,
|
24
|
+
# and also to prevent "stale" values from being passed on to a new TLV
|
25
|
+
# when the index is reused)
|
26
|
+
# Because we need to null out freed slots, we need to keep references to
|
27
|
+
# ALL the thread-local arrays -- ARRAYS is for that
|
28
|
+
# But when a Thread is GC'd, we need to drop the reference to its thread-local
|
29
|
+
# array, so we don't leak memory
|
30
|
+
|
31
|
+
# @!visibility private
|
32
|
+
FREE = []
|
33
|
+
LOCK = Mutex.new
|
34
|
+
ARRAYS = {} # used as a hash set
|
35
|
+
@@next = 0
|
36
|
+
private_constant :FREE, :LOCK, :ARRAYS
|
37
|
+
|
38
|
+
# @!macro [attach] thread_local_var_method_initialize
|
39
|
+
#
|
40
|
+
# Creates a thread local variable.
|
41
|
+
#
|
42
|
+
# @param [Object] default the default value when otherwise unset
|
43
|
+
def initialize(default = nil)
|
44
|
+
@default = default
|
45
|
+
allocate_storage
|
46
|
+
end
|
47
|
+
|
48
|
+
# @!macro thread_local_var_method_get
|
49
|
+
def value
|
50
|
+
if array = get_threadlocal_array
|
51
|
+
value = array[@index]
|
52
|
+
if value.nil?
|
53
|
+
@default
|
54
|
+
elsif value.equal?(NIL_SENTINEL)
|
55
|
+
nil
|
56
|
+
else
|
57
|
+
value
|
58
|
+
end
|
59
|
+
else
|
60
|
+
@default
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
# @!macro thread_local_var_method_set
|
65
|
+
def value=(value)
|
66
|
+
me = Thread.current
|
67
|
+
# We could keep the thread-local arrays in a hash, keyed by Thread
|
68
|
+
# But why? That would require locking
|
69
|
+
# Using Ruby's built-in thread-local storage is faster
|
70
|
+
unless array = get_threadlocal_array(me)
|
71
|
+
array = set_threadlocal_array([], me)
|
72
|
+
LOCK.synchronize { ARRAYS[array.object_id] = array }
|
73
|
+
ObjectSpace.define_finalizer(me, self.class.thread_finalizer(array))
|
74
|
+
end
|
75
|
+
array[@index] = (value.nil? ? NIL_SENTINEL : value)
|
76
|
+
value
|
77
|
+
end
|
78
|
+
|
79
|
+
# @!macro thread_local_var_method_bind
|
80
|
+
def bind(value, &block)
|
81
|
+
if block_given?
|
82
|
+
old_value = self.value
|
83
|
+
begin
|
84
|
+
self.value = value
|
85
|
+
yield
|
86
|
+
ensure
|
87
|
+
self.value = old_value
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
91
|
+
|
92
|
+
protected
|
93
|
+
|
94
|
+
# @!visibility private
|
95
|
+
def allocate_storage
|
96
|
+
@index = LOCK.synchronize do
|
97
|
+
FREE.pop || begin
|
98
|
+
result = @@next
|
99
|
+
@@next += 1
|
100
|
+
result
|
101
|
+
end
|
102
|
+
end
|
103
|
+
ObjectSpace.define_finalizer(self, self.class.threadlocal_finalizer(@index))
|
104
|
+
end
|
105
|
+
|
106
|
+
# @!visibility private
|
107
|
+
def self.threadlocal_finalizer(index)
|
108
|
+
proc do
|
109
|
+
LOCK.synchronize do
|
110
|
+
FREE.push(index)
|
111
|
+
# The cost of GC'ing a TLV is linear in the number of threads using TLVs
|
112
|
+
# But that is natural! More threads means more storage is used per TLV
|
113
|
+
# So naturally more CPU time is required to free more storage
|
114
|
+
ARRAYS.each_value do |array|
|
115
|
+
array[index] = nil
|
116
|
+
end
|
117
|
+
end
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
# @!visibility private
|
122
|
+
def self.thread_finalizer(array)
|
123
|
+
proc do
|
124
|
+
LOCK.synchronize do
|
125
|
+
# The thread which used this thread-local array is now gone
|
126
|
+
# So don't hold onto a reference to the array (thus blocking GC)
|
127
|
+
ARRAYS.delete(array.object_id)
|
128
|
+
end
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
private
|
133
|
+
|
134
|
+
if Thread.instance_methods.include?(:thread_variable_get)
|
135
|
+
|
136
|
+
def get_threadlocal_array(thread = Thread.current)
|
137
|
+
thread.thread_variable_get(:__threadlocal_array__)
|
138
|
+
end
|
139
|
+
|
140
|
+
def set_threadlocal_array(array, thread = Thread.current)
|
141
|
+
thread.thread_variable_set(:__threadlocal_array__, array)
|
142
|
+
end
|
143
|
+
|
144
|
+
else
|
145
|
+
|
146
|
+
def get_threadlocal_array(thread = Thread.current)
|
147
|
+
thread[:__threadlocal_array__]
|
148
|
+
end
|
149
|
+
|
150
|
+
def set_threadlocal_array(array, thread = Thread.current)
|
151
|
+
thread[:__threadlocal_array__] = array
|
152
|
+
end
|
153
|
+
end
|
154
|
+
|
155
|
+
# This exists only for use in testing
|
156
|
+
# @!visibility private
|
157
|
+
def value_for(thread)
|
158
|
+
if array = get_threadlocal_array(thread)
|
159
|
+
value = array[@index]
|
160
|
+
if value.nil?
|
161
|
+
@default
|
162
|
+
elsif value.equal?(NIL_SENTINEL)
|
163
|
+
nil
|
164
|
+
else
|
165
|
+
value
|
166
|
+
end
|
167
|
+
else
|
168
|
+
@default
|
169
|
+
end
|
170
|
+
end
|
171
|
+
end
|
172
|
+
end
|