concurrent-ruby 1.0.3.pre3-java → 1.0.4-java
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +40 -12
- data/README.md +29 -39
- data/lib/concurrent.rb +3 -3
- data/lib/concurrent/async.rb +2 -2
- data/lib/concurrent/atom.rb +4 -3
- data/lib/concurrent/atomic/abstract_thread_local_var.rb +29 -3
- data/lib/concurrent/atomic/atomic_fixnum.rb +4 -0
- data/lib/concurrent/atomic/atomic_reference.rb +7 -0
- data/lib/concurrent/atomic/count_down_latch.rb +23 -0
- data/lib/concurrent/atomic/cyclic_barrier.rb +23 -3
- data/lib/concurrent/atomic/java_thread_local_var.rb +1 -14
- data/lib/concurrent/atomic/mutex_atomic_fixnum.rb +2 -18
- data/lib/concurrent/atomic/mutex_count_down_latch.rb +3 -3
- data/lib/concurrent/atomic/mutex_semaphore.rb +15 -15
- data/lib/concurrent/atomic/ruby_thread_local_var.rb +31 -42
- data/lib/concurrent/atomic/thread_local_var.rb +7 -5
- data/lib/concurrent/atomic_reference/jruby+truffle.rb +2 -1
- data/lib/concurrent/collection/map/non_concurrent_map_backend.rb +1 -0
- data/lib/concurrent/concern/obligation.rb +1 -0
- data/lib/concurrent/configuration.rb +56 -21
- data/lib/concurrent/errors.rb +24 -1
- data/lib/concurrent/executor/timer_set.rb +11 -0
- data/lib/concurrent/hash.rb +2 -1
- data/lib/concurrent/map.rb +5 -3
- data/lib/concurrent/promise.rb +10 -6
- data/lib/concurrent/synchronization/object.rb +2 -2
- data/lib/concurrent/synchronization/rbx_object.rb +1 -0
- data/lib/concurrent/synchronization/truffle_object.rb +1 -2
- data/lib/concurrent/thread_safe/util.rb +2 -0
- data/lib/concurrent/timer_task.rb +3 -3
- data/lib/concurrent/tvar.rb +1 -1
- data/lib/concurrent/utility/engine.rb +3 -3
- data/lib/concurrent/utility/native_integer.rb +53 -0
- data/lib/concurrent/utility/processor_counter.rb +15 -13
- data/lib/concurrent/version.rb +2 -2
- data/lib/concurrent_ruby_ext.jar +0 -0
- metadata +8 -6
@@ -13,7 +13,7 @@ if Concurrent.on_jruby?
|
|
13
13
|
value = @var.get
|
14
14
|
|
15
15
|
if value.nil?
|
16
|
-
|
16
|
+
default
|
17
17
|
elsif value == NULL
|
18
18
|
nil
|
19
19
|
else
|
@@ -26,19 +26,6 @@ if Concurrent.on_jruby?
|
|
26
26
|
@var.set(value)
|
27
27
|
end
|
28
28
|
|
29
|
-
# @!macro thread_local_var_method_bind
|
30
|
-
def bind(value, &block)
|
31
|
-
if block_given?
|
32
|
-
old_value = @var.get
|
33
|
-
begin
|
34
|
-
@var.set(value)
|
35
|
-
yield
|
36
|
-
ensure
|
37
|
-
@var.set(old_value)
|
38
|
-
end
|
39
|
-
end
|
40
|
-
end
|
41
|
-
|
42
29
|
protected
|
43
30
|
|
44
31
|
# @!visibility private
|
@@ -1,4 +1,5 @@
|
|
1
1
|
require 'concurrent/synchronization'
|
2
|
+
require 'concurrent/utility/native_integer'
|
2
3
|
|
3
4
|
module Concurrent
|
4
5
|
|
@@ -7,10 +8,6 @@ module Concurrent
|
|
7
8
|
# @!macro internal_implementation_note
|
8
9
|
class MutexAtomicFixnum < Synchronization::LockableObject
|
9
10
|
|
10
|
-
# http://stackoverflow.com/questions/535721/ruby-max-integer
|
11
|
-
MIN_VALUE = -(2**(0.size * 8 - 2))
|
12
|
-
MAX_VALUE = (2**(0.size * 8 - 2) - 1)
|
13
|
-
|
14
11
|
# @!macro atomic_fixnum_method_initialize
|
15
12
|
def initialize(initial = 0)
|
16
13
|
super()
|
@@ -71,21 +68,8 @@ module Concurrent
|
|
71
68
|
|
72
69
|
# @!visibility private
|
73
70
|
def ns_set(value)
|
74
|
-
|
71
|
+
Utility::NativeInteger.ensure_integer_and_bounds value
|
75
72
|
@value = value
|
76
73
|
end
|
77
|
-
|
78
|
-
# @!visibility private
|
79
|
-
def range_check!(value)
|
80
|
-
if !value.is_a?(Fixnum)
|
81
|
-
raise ArgumentError.new('value value must be a Fixnum')
|
82
|
-
elsif value > MAX_VALUE
|
83
|
-
raise RangeError.new("#{value} is greater than the maximum value of #{MAX_VALUE}")
|
84
|
-
elsif value < MIN_VALUE
|
85
|
-
raise RangeError.new("#{value} is less than the maximum value of #{MIN_VALUE}")
|
86
|
-
else
|
87
|
-
value
|
88
|
-
end
|
89
|
-
end
|
90
74
|
end
|
91
75
|
end
|
@@ -9,9 +9,9 @@ module Concurrent
|
|
9
9
|
|
10
10
|
# @!macro count_down_latch_method_initialize
|
11
11
|
def initialize(count = 1)
|
12
|
-
|
13
|
-
|
14
|
-
|
12
|
+
Utility::NativeInteger.ensure_integer_and_bounds count
|
13
|
+
Utility::NativeInteger.ensure_positive count
|
14
|
+
|
15
15
|
super()
|
16
16
|
synchronize { ns_initialize count }
|
17
17
|
end
|
@@ -1,4 +1,5 @@
|
|
1
1
|
require 'concurrent/synchronization'
|
2
|
+
require 'concurrent/utility/native_integer'
|
2
3
|
|
3
4
|
module Concurrent
|
4
5
|
|
@@ -9,18 +10,17 @@ module Concurrent
|
|
9
10
|
|
10
11
|
# @!macro semaphore_method_initialize
|
11
12
|
def initialize(count)
|
12
|
-
|
13
|
-
|
14
|
-
end
|
13
|
+
Utility::NativeInteger.ensure_integer_and_bounds count
|
14
|
+
|
15
15
|
super()
|
16
16
|
synchronize { ns_initialize count }
|
17
17
|
end
|
18
18
|
|
19
19
|
# @!macro semaphore_method_acquire
|
20
20
|
def acquire(permits = 1)
|
21
|
-
|
22
|
-
|
23
|
-
|
21
|
+
Utility::NativeInteger.ensure_integer_and_bounds permits
|
22
|
+
Utility::NativeInteger.ensure_positive permits
|
23
|
+
|
24
24
|
synchronize do
|
25
25
|
try_acquire_timed(permits, nil)
|
26
26
|
nil
|
@@ -45,9 +45,9 @@ module Concurrent
|
|
45
45
|
|
46
46
|
# @!macro semaphore_method_try_acquire
|
47
47
|
def try_acquire(permits = 1, timeout = nil)
|
48
|
-
|
49
|
-
|
50
|
-
|
48
|
+
Utility::NativeInteger.ensure_integer_and_bounds permits
|
49
|
+
Utility::NativeInteger.ensure_positive permits
|
50
|
+
|
51
51
|
synchronize do
|
52
52
|
if timeout.nil?
|
53
53
|
try_acquire_now(permits)
|
@@ -59,9 +59,9 @@ module Concurrent
|
|
59
59
|
|
60
60
|
# @!macro semaphore_method_release
|
61
61
|
def release(permits = 1)
|
62
|
-
|
63
|
-
|
64
|
-
|
62
|
+
Utility::NativeInteger.ensure_integer_and_bounds permits
|
63
|
+
Utility::NativeInteger.ensure_positive permits
|
64
|
+
|
65
65
|
synchronize do
|
66
66
|
@free += permits
|
67
67
|
permits.times { ns_signal }
|
@@ -81,9 +81,9 @@ module Concurrent
|
|
81
81
|
#
|
82
82
|
# @!visibility private
|
83
83
|
def reduce_permits(reduction)
|
84
|
-
|
85
|
-
|
86
|
-
|
84
|
+
Utility::NativeInteger.ensure_integer_and_bounds reduction
|
85
|
+
Utility::NativeInteger.ensure_positive reduction
|
86
|
+
|
87
87
|
synchronize { @free -= reduction }
|
88
88
|
nil
|
89
89
|
end
|
@@ -29,35 +29,25 @@ module Concurrent
|
|
29
29
|
# array, so we don't leak memory
|
30
30
|
|
31
31
|
# @!visibility private
|
32
|
-
FREE
|
33
|
-
LOCK
|
32
|
+
FREE = []
|
33
|
+
LOCK = Mutex.new
|
34
34
|
ARRAYS = {} # used as a hash set
|
35
35
|
@@next = 0
|
36
36
|
private_constant :FREE, :LOCK, :ARRAYS
|
37
37
|
|
38
|
-
# @!macro [attach] thread_local_var_method_initialize
|
39
|
-
#
|
40
|
-
# Creates a thread local variable.
|
41
|
-
#
|
42
|
-
# @param [Object] default the default value when otherwise unset
|
43
|
-
def initialize(default = nil)
|
44
|
-
@default = default
|
45
|
-
allocate_storage
|
46
|
-
end
|
47
|
-
|
48
38
|
# @!macro thread_local_var_method_get
|
49
39
|
def value
|
50
40
|
if array = get_threadlocal_array
|
51
41
|
value = array[@index]
|
52
42
|
if value.nil?
|
53
|
-
|
43
|
+
default
|
54
44
|
elsif value.equal?(NULL)
|
55
45
|
nil
|
56
46
|
else
|
57
47
|
value
|
58
48
|
end
|
59
49
|
else
|
60
|
-
|
50
|
+
default
|
61
51
|
end
|
62
52
|
end
|
63
53
|
|
@@ -76,28 +66,15 @@ module Concurrent
|
|
76
66
|
value
|
77
67
|
end
|
78
68
|
|
79
|
-
# @!macro thread_local_var_method_bind
|
80
|
-
def bind(value, &block)
|
81
|
-
if block_given?
|
82
|
-
old_value = self.value
|
83
|
-
begin
|
84
|
-
self.value = value
|
85
|
-
yield
|
86
|
-
ensure
|
87
|
-
self.value = old_value
|
88
|
-
end
|
89
|
-
end
|
90
|
-
end
|
91
|
-
|
92
69
|
protected
|
93
70
|
|
94
71
|
# @!visibility private
|
95
72
|
def allocate_storage
|
96
73
|
@index = LOCK.synchronize do
|
97
74
|
FREE.pop || begin
|
98
|
-
|
99
|
-
|
100
|
-
|
75
|
+
result = @@next
|
76
|
+
@@next += 1
|
77
|
+
result
|
101
78
|
end
|
102
79
|
end
|
103
80
|
ObjectSpace.define_finalizer(self, self.class.threadlocal_finalizer(@index))
|
@@ -106,13 +83,15 @@ module Concurrent
|
|
106
83
|
# @!visibility private
|
107
84
|
def self.threadlocal_finalizer(index)
|
108
85
|
proc do
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
86
|
+
Thread.new do # avoid error: can't be called from trap context
|
87
|
+
LOCK.synchronize do
|
88
|
+
FREE.push(index)
|
89
|
+
# The cost of GC'ing a TLV is linear in the number of threads using TLVs
|
90
|
+
# But that is natural! More threads means more storage is used per TLV
|
91
|
+
# So naturally more CPU time is required to free more storage
|
92
|
+
ARRAYS.each_value do |array|
|
93
|
+
array[index] = nil
|
94
|
+
end
|
116
95
|
end
|
117
96
|
end
|
118
97
|
end
|
@@ -121,10 +100,12 @@ module Concurrent
|
|
121
100
|
# @!visibility private
|
122
101
|
def self.thread_finalizer(array)
|
123
102
|
proc do
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
103
|
+
Thread.new do # avoid error: can't be called from trap context
|
104
|
+
LOCK.synchronize do
|
105
|
+
# The thread which used this thread-local array is now gone
|
106
|
+
# So don't hold onto a reference to the array (thus blocking GC)
|
107
|
+
ARRAYS.delete(array.object_id)
|
108
|
+
end
|
128
109
|
end
|
129
110
|
end
|
130
111
|
end
|
@@ -158,12 +139,20 @@ module Concurrent
|
|
158
139
|
if array = get_threadlocal_array(thread)
|
159
140
|
value = array[@index]
|
160
141
|
if value.nil?
|
161
|
-
|
142
|
+
default_for(thread)
|
162
143
|
elsif value.equal?(NULL)
|
163
144
|
nil
|
164
145
|
else
|
165
146
|
value
|
166
147
|
end
|
148
|
+
else
|
149
|
+
default_for(thread)
|
150
|
+
end
|
151
|
+
end
|
152
|
+
|
153
|
+
def default_for(thread)
|
154
|
+
if @default_block
|
155
|
+
raise "Cannot use default_for with default block"
|
167
156
|
else
|
168
157
|
@default
|
169
158
|
end
|
@@ -11,6 +11,8 @@ module Concurrent
|
|
11
11
|
# Creates a thread local variable.
|
12
12
|
#
|
13
13
|
# @param [Object] default the default value when otherwise unset
|
14
|
+
# @param [Proc] default_block Optional block that gets called to obtain the
|
15
|
+
# default value for each thread
|
14
16
|
|
15
17
|
# @!macro [new] thread_local_var_method_get
|
16
18
|
#
|
@@ -70,28 +72,28 @@ module Concurrent
|
|
70
72
|
# the current thread will ever see that change.
|
71
73
|
#
|
72
74
|
# @!macro thread_safe_variable_comparison
|
73
|
-
#
|
75
|
+
#
|
74
76
|
# @example
|
75
77
|
# v = ThreadLocalVar.new(14)
|
76
78
|
# v.value #=> 14
|
77
79
|
# v.value = 2
|
78
80
|
# v.value #=> 2
|
79
|
-
#
|
81
|
+
#
|
80
82
|
# @example
|
81
83
|
# v = ThreadLocalVar.new(14)
|
82
|
-
#
|
84
|
+
#
|
83
85
|
# t1 = Thread.new do
|
84
86
|
# v.value #=> 14
|
85
87
|
# v.value = 1
|
86
88
|
# v.value #=> 1
|
87
89
|
# end
|
88
|
-
#
|
90
|
+
#
|
89
91
|
# t2 = Thread.new do
|
90
92
|
# v.value #=> 14
|
91
93
|
# v.value = 2
|
92
94
|
# v.value #=> 2
|
93
95
|
# end
|
94
|
-
#
|
96
|
+
#
|
95
97
|
# v.value #=> 14
|
96
98
|
#
|
97
99
|
# @see https://docs.oracle.com/javase/7/docs/api/java/lang/ThreadLocal.html Java ThreadLocal
|
@@ -1 +1,2 @@
|
|
1
|
-
require '
|
1
|
+
require 'atomic'
|
2
|
+
require 'concurrent/atomic_reference/rbx'
|
@@ -10,11 +10,43 @@ require 'concurrent/utility/processor_counter'
|
|
10
10
|
module Concurrent
|
11
11
|
extend Concern::Logging
|
12
12
|
|
13
|
-
autoload :Options,
|
14
|
-
autoload :TimerSet,
|
13
|
+
autoload :Options, 'concurrent/options'
|
14
|
+
autoload :TimerSet, 'concurrent/executor/timer_set'
|
15
15
|
autoload :ThreadPoolExecutor, 'concurrent/executor/thread_pool_executor'
|
16
16
|
|
17
17
|
# @return [Logger] Logger with provided level and output.
|
18
|
+
def self.create_simple_logger(level = Logger::FATAL, output = $stderr)
|
19
|
+
# TODO (pitr-ch 24-Dec-2016): figure out why it had to be replaced, stdlogger was deadlocking
|
20
|
+
lambda do |severity, progname, message = nil, &block|
|
21
|
+
return false if severity < level
|
22
|
+
|
23
|
+
message = block ? block.call : message
|
24
|
+
formatted_message = case message
|
25
|
+
when String
|
26
|
+
message
|
27
|
+
when Exception
|
28
|
+
format "%s (%s)\n%s",
|
29
|
+
message.message, message.class, (message.backtrace || []).join("\n")
|
30
|
+
else
|
31
|
+
message.inspect
|
32
|
+
end
|
33
|
+
|
34
|
+
output.print format "[%s] %5s -- %s: %s\n",
|
35
|
+
Time.now.strftime('%Y-%m-%d %H:%M:%S.%L'),
|
36
|
+
Logger::SEV_LABEL[severity],
|
37
|
+
progname,
|
38
|
+
formatted_message
|
39
|
+
true
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
# Use logger created by #create_simple_logger to log concurrent-ruby messages.
|
44
|
+
def self.use_simple_logger(level = Logger::FATAL, output = $stderr)
|
45
|
+
Concurrent.global_logger = create_simple_logger level, output
|
46
|
+
end
|
47
|
+
|
48
|
+
# @return [Logger] Logger with provided level and output.
|
49
|
+
# @deprecated
|
18
50
|
def self.create_stdlib_logger(level = Logger::FATAL, output = $stderr)
|
19
51
|
logger = Logger.new(output)
|
20
52
|
logger.level = level
|
@@ -24,32 +56,35 @@ module Concurrent
|
|
24
56
|
msg
|
25
57
|
when Exception
|
26
58
|
format "%s (%s)\n%s",
|
27
|
-
|
59
|
+
msg.message, msg.class, (msg.backtrace || []).join("\n")
|
28
60
|
else
|
29
61
|
msg.inspect
|
30
62
|
end
|
31
63
|
format "[%s] %5s -- %s: %s\n",
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
64
|
+
datetime.strftime('%Y-%m-%d %H:%M:%S.%L'),
|
65
|
+
severity,
|
66
|
+
progname,
|
67
|
+
formatted_message
|
36
68
|
end
|
37
69
|
|
38
70
|
lambda do |loglevel, progname, message = nil, &block|
|
39
|
-
|
71
|
+
logger.add loglevel, message, progname, &block
|
40
72
|
end
|
41
73
|
end
|
42
74
|
|
43
75
|
# Use logger created by #create_stdlib_logger to log concurrent-ruby messages.
|
76
|
+
# @deprecated
|
44
77
|
def self.use_stdlib_logger(level = Logger::FATAL, output = $stderr)
|
45
78
|
Concurrent.global_logger = create_stdlib_logger level, output
|
46
79
|
end
|
47
80
|
|
81
|
+
# TODO (pitr-ch 27-Dec-2016): remove deadlocking stdlib_logger methods
|
82
|
+
|
48
83
|
# Suppresses all output when used for logging.
|
49
84
|
NULL_LOGGER = lambda { |level, progname, message = nil, &block| }
|
50
85
|
|
51
86
|
# @!visibility private
|
52
|
-
GLOBAL_LOGGER = AtomicReference.new(
|
87
|
+
GLOBAL_LOGGER = AtomicReference.new(create_simple_logger(Logger::WARN))
|
53
88
|
private_constant :GLOBAL_LOGGER
|
54
89
|
|
55
90
|
def self.global_logger
|
@@ -131,23 +166,23 @@ module Concurrent
|
|
131
166
|
|
132
167
|
def self.new_fast_executor(opts = {})
|
133
168
|
FixedThreadPool.new(
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
169
|
+
[2, Concurrent.processor_count].max,
|
170
|
+
auto_terminate: opts.fetch(:auto_terminate, true),
|
171
|
+
idletime: 60, # 1 minute
|
172
|
+
max_queue: 0, # unlimited
|
173
|
+
fallback_policy: :abort # shouldn't matter -- 0 max queue
|
139
174
|
)
|
140
175
|
end
|
141
176
|
|
142
177
|
def self.new_io_executor(opts = {})
|
143
178
|
ThreadPoolExecutor.new(
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
179
|
+
min_threads: [2, Concurrent.processor_count].max,
|
180
|
+
max_threads: ThreadPoolExecutor::DEFAULT_MAX_POOL_SIZE,
|
181
|
+
# max_threads: 1000,
|
182
|
+
auto_terminate: opts.fetch(:auto_terminate, true),
|
183
|
+
idletime: 60, # 1 minute
|
184
|
+
max_queue: 0, # unlimited
|
185
|
+
fallback_policy: :abort # shouldn't matter -- 0 max queue
|
151
186
|
)
|
152
187
|
end
|
153
188
|
end
|