concurrent-ruby 1.2.0 → 1.2.3

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f224c72d89a1bd079ad1086c9ed28a1d4d6432c718b9e0624c9a32afc8500cc8
4
- data.tar.gz: dfb811189c8e7eeb2ea1f17fc1e680657dffe767c77bb09312e85a4f29f2bd60
3
+ metadata.gz: 1c6483d36c3c75859c9b38b6ba2ba13577a0bb89b3b725a032d43f3543797099
4
+ data.tar.gz: e94ac3e1ba23db29a7bf025736e34a4027ba7a137ae08b493c78ce3756803792
5
5
  SHA512:
6
- metadata.gz: 807b1aac5e1bdee569df0b9e38fff1ed339f2c8569cd0a7ab565cbbecb23ffc9c6cf968e457e2acecf3abd187ac43da16dc2fb91716c912e78c08aa122dcaf0e
7
- data.tar.gz: 07c37140aa1417ad32daa0de92f4c0883c7a96c1c53fba9aa09e34598632b3ad274283034064df41181e59964d0c79e41d083f17912e8e121c4f2d774c988b81
6
+ metadata.gz: dc0353e60638a409bf891d663626bd2c1c5f05206c08036326d63e239bfe0ea7e1e3480982c75fe44f9d416a7f76a66a2a2aeef78fc60084ed7ff94c7d3181d9
7
+ data.tar.gz: 24bdd3a040528c05061ec15b4057184be96fa933f69f3321755e8583ba8e6175beec48c36f1671cc3dad7e413588c9601c7f0e5b503b3de947dff63c2ba76b1e
data/CHANGELOG.md CHANGED
@@ -1,5 +1,18 @@
1
1
  ## Current
2
2
 
3
+ ## Release v1.2.3 (16 Jan 2024)
4
+
5
+ * See [the GitHub release](https://github.com/ruby-concurrency/concurrent-ruby/releases/tag/v1.2.3) for details.
6
+
7
+ ## Release v1.2.2 (24 Feb 2023)
8
+
9
+ * (#993) Fix arguments passed to `Concurrent::Map`'s `default_proc`.
10
+
11
+ ## Release v1.2.1 (24 Feb 2023)
12
+
13
+ * (#990) Add missing `require 'fiber'` for `FiberLocalVar`.
14
+ * (#989) Optimize `Concurrent::Map#[]` on CRuby by letting the backing Hash handle the `default_proc`.
15
+
3
16
  ## Release v1.2.0 (23 Jan 2023)
4
17
 
5
18
  * (#962) Fix ReentrantReadWriteLock to use the same granularity for locals as for Mutex it uses.
data/Gemfile CHANGED
@@ -12,7 +12,7 @@ gem 'concurrent-ruby-ext', Concurrent::VERSION, options.merge(platform: :mri)
12
12
 
13
13
  group :development do
14
14
  gem 'rake', '~> 13.0'
15
- gem 'rake-compiler', '~> 1.0', '>= 1.0.7'
15
+ gem 'rake-compiler', '~> 1.0', '>= 1.0.7', '!= 1.2.4'
16
16
  gem 'rake-compiler-dock', '~> 1.0'
17
17
  gem 'pry', '~> 0.11', platforms: :mri
18
18
  end
data/README.md CHANGED
@@ -375,6 +375,8 @@ best practice is to depend on `concurrent-ruby` and let users to decide if they
375
375
  * [Benoit Daloze](https://github.com/eregon)
376
376
  * [Matthew Draper](https://github.com/matthewd)
377
377
  * [Rafael França](https://github.com/rafaelfranca)
378
+ * [Charles Oliver Nutter](https://github.com/headius)
379
+ * [Ben Sheldon](https://github.com/bensheldon)
378
380
  * [Samuel Williams](https://github.com/ioquatix)
379
381
 
380
382
  ### Special Thanks to
data/Rakefile CHANGED
@@ -271,6 +271,7 @@ namespace :release do
271
271
  task :publish => ['publish:ask', 'publish:tag', 'publish:rubygems', 'publish:post_steps']
272
272
 
273
273
  namespace :publish do
274
+ publish_base = true
274
275
  publish_edge = false
275
276
 
276
277
  task :ask do
@@ -289,8 +290,8 @@ namespace :release do
289
290
  desc '** tag HEAD with current version and push to github'
290
291
  task :tag => :ask do
291
292
  Dir.chdir(__dir__) do
292
- sh "git tag v#{Concurrent::VERSION}"
293
- sh "git push origin v#{Concurrent::VERSION}"
293
+ sh "git tag v#{Concurrent::VERSION}" if publish_base
294
+ sh "git push origin v#{Concurrent::VERSION}" if publish_base
294
295
  sh "git tag edge-v#{Concurrent::EDGE_VERSION}" if publish_edge
295
296
  sh "git push origin edge-v#{Concurrent::EDGE_VERSION}" if publish_edge
296
297
  end
@@ -299,11 +300,11 @@ namespace :release do
299
300
  desc '** push all *.gem files to rubygems'
300
301
  task :rubygems => :ask do
301
302
  Dir.chdir(__dir__) do
302
- sh "gem push pkg/concurrent-ruby-#{Concurrent::VERSION}.gem"
303
+ sh "gem push pkg/concurrent-ruby-#{Concurrent::VERSION}.gem" if publish_base
303
304
  sh "gem push pkg/concurrent-ruby-edge-#{Concurrent::EDGE_VERSION}.gem" if publish_edge
304
- sh "gem push pkg/concurrent-ruby-ext-#{Concurrent::VERSION}.gem"
305
- sh "gem push pkg/concurrent-ruby-ext-#{Concurrent::VERSION}-x64-mingw32.gem"
306
- sh "gem push pkg/concurrent-ruby-ext-#{Concurrent::VERSION}-x86-mingw32.gem"
305
+ sh "gem push pkg/concurrent-ruby-ext-#{Concurrent::VERSION}.gem" if publish_base
306
+ sh "gem push pkg/concurrent-ruby-ext-#{Concurrent::VERSION}-x64-mingw32.gem" if publish_base
307
+ sh "gem push pkg/concurrent-ruby-ext-#{Concurrent::VERSION}-x86-mingw32.gem" if publish_base
307
308
  end
308
309
  end
309
310
 
@@ -21,9 +21,9 @@ module Concurrent
21
21
  # @!macro internal_implementation_note
22
22
  ArrayImplementation = case
23
23
  when Concurrent.on_cruby?
24
- # Array is thread-safe in practice because CRuby runs
25
- # threads one at a time and does not do context
26
- # switching during the execution of C functions.
24
+ # Array is not fully thread-safe on CRuby, see
25
+ # https://github.com/ruby-concurrency/concurrent-ruby/issues/929
26
+ # So we will need to add synchronization here
27
27
  ::Array
28
28
 
29
29
  when Concurrent.on_jruby?
@@ -1,3 +1,4 @@
1
+ require 'fiber'
1
2
  require 'concurrent/utility/engine'
2
3
  require 'concurrent/constants'
3
4
 
@@ -9,8 +9,8 @@ module Concurrent
9
9
  # @!visibility private
10
10
  class MriMapBackend < NonConcurrentMapBackend
11
11
 
12
- def initialize(options = nil)
13
- super(options)
12
+ def initialize(options = nil, &default_proc)
13
+ super(options, &default_proc)
14
14
  @write_lock = Mutex.new
15
15
  end
16
16
 
@@ -12,8 +12,10 @@ module Concurrent
12
12
  # directly without calling each other. This is important because of the
13
13
  # SynchronizedMapBackend which uses a non-reentrant mutex for performance
14
14
  # reasons.
15
- def initialize(options = nil)
16
- @backend = {}
15
+ def initialize(options = nil, &default_proc)
16
+ validate_options_hash!(options) if options.kind_of?(::Hash)
17
+ set_backend(default_proc)
18
+ @default_proc = default_proc
17
19
  end
18
20
 
19
21
  def [](key)
@@ -55,7 +57,7 @@ module Concurrent
55
57
  end
56
58
 
57
59
  def compute(key)
58
- store_computed_value(key, yield(@backend[key]))
60
+ store_computed_value(key, yield(get_or_default(key, nil)))
59
61
  end
60
62
 
61
63
  def merge_pair(key, value)
@@ -67,7 +69,7 @@ module Concurrent
67
69
  end
68
70
 
69
71
  def get_and_set(key, value)
70
- stored_value = @backend[key]
72
+ stored_value = get_or_default(key, nil)
71
73
  @backend[key] = value
72
74
  stored_value
73
75
  end
@@ -109,13 +111,19 @@ module Concurrent
109
111
  @backend.fetch(key, default_value)
110
112
  end
111
113
 
112
- alias_method :_get, :[]
113
- alias_method :_set, :[]=
114
- private :_get, :_set
115
114
  private
115
+
116
+ def set_backend(default_proc)
117
+ if default_proc
118
+ @backend = ::Hash.new { |_h, key| default_proc.call(self, key) }
119
+ else
120
+ @backend = {}
121
+ end
122
+ end
123
+
116
124
  def initialize_copy(other)
117
125
  super
118
- @backend = {}
126
+ set_backend(@default_proc)
119
127
  self
120
128
  end
121
129
 
@@ -8,74 +8,77 @@ module Concurrent
8
8
  # @!visibility private
9
9
  class SynchronizedMapBackend < NonConcurrentMapBackend
10
10
 
11
- require 'mutex_m'
12
- include Mutex_m
13
- # WARNING: Mutex_m is a non-reentrant lock, so the synchronized methods are
14
- # not allowed to call each other.
11
+ def initialize(*args, &block)
12
+ super
13
+
14
+ # WARNING: Mutex is a non-reentrant lock, so the synchronized methods are
15
+ # not allowed to call each other.
16
+ @mutex = Mutex.new
17
+ end
15
18
 
16
19
  def [](key)
17
- synchronize { super }
20
+ @mutex.synchronize { super }
18
21
  end
19
22
 
20
23
  def []=(key, value)
21
- synchronize { super }
24
+ @mutex.synchronize { super }
22
25
  end
23
26
 
24
27
  def compute_if_absent(key)
25
- synchronize { super }
28
+ @mutex.synchronize { super }
26
29
  end
27
30
 
28
31
  def compute_if_present(key)
29
- synchronize { super }
32
+ @mutex.synchronize { super }
30
33
  end
31
34
 
32
35
  def compute(key)
33
- synchronize { super }
36
+ @mutex.synchronize { super }
34
37
  end
35
38
 
36
39
  def merge_pair(key, value)
37
- synchronize { super }
40
+ @mutex.synchronize { super }
38
41
  end
39
42
 
40
43
  def replace_pair(key, old_value, new_value)
41
- synchronize { super }
44
+ @mutex.synchronize { super }
42
45
  end
43
46
 
44
47
  def replace_if_exists(key, new_value)
45
- synchronize { super }
48
+ @mutex.synchronize { super }
46
49
  end
47
50
 
48
51
  def get_and_set(key, value)
49
- synchronize { super }
52
+ @mutex.synchronize { super }
50
53
  end
51
54
 
52
55
  def key?(key)
53
- synchronize { super }
56
+ @mutex.synchronize { super }
54
57
  end
55
58
 
56
59
  def delete(key)
57
- synchronize { super }
60
+ @mutex.synchronize { super }
58
61
  end
59
62
 
60
63
  def delete_pair(key, value)
61
- synchronize { super }
64
+ @mutex.synchronize { super }
62
65
  end
63
66
 
64
67
  def clear
65
- synchronize { super }
68
+ @mutex.synchronize { super }
66
69
  end
67
70
 
68
71
  def size
69
- synchronize { super }
72
+ @mutex.synchronize { super }
70
73
  end
71
74
 
72
75
  def get_or_default(key, default_value)
73
- synchronize { super }
76
+ @mutex.synchronize { super }
74
77
  end
75
78
 
76
79
  private
77
80
  def dupped_backend
78
- synchronize { super }
81
+ @mutex.synchronize { super }
79
82
  end
80
83
  end
81
84
  end
@@ -39,6 +39,10 @@ module Concurrent
39
39
  # The number of tasks that have been completed by the pool since construction.
40
40
  # @return [Integer] The number of tasks that have been completed by the pool since construction.
41
41
 
42
+ # @!macro thread_pool_executor_method_active_count
43
+ # The number of threads that are actively executing tasks.
44
+ # @return [Integer] The number of threads that are actively executing tasks.
45
+
42
46
  # @!macro thread_pool_executor_attr_reader_idletime
43
47
  # The number of seconds that a thread may be idle before being reclaimed.
44
48
  # @return [Integer] The number of seconds that a thread may be idle before being reclaimed.
@@ -88,10 +88,11 @@ if Concurrent.on_jruby?
88
88
 
89
89
  def initialize(daemonize = true)
90
90
  @daemonize = daemonize
91
+ @java_thread_factory = java.util.concurrent.Executors.defaultThreadFactory
91
92
  end
92
93
 
93
94
  def newThread(runnable)
94
- thread = java.util.concurrent.Executors.defaultThreadFactory().newThread(runnable)
95
+ thread = @java_thread_factory.newThread(runnable)
95
96
  thread.setDaemon(@daemonize)
96
97
  return thread
97
98
  end
@@ -73,6 +73,11 @@ if Concurrent.on_jruby?
73
73
  @executor.getCompletedTaskCount
74
74
  end
75
75
 
76
+ # @!macro thread_pool_executor_method_active_count
77
+ def active_count
78
+ @executor.getActiveCount
79
+ end
80
+
76
81
  # @!macro thread_pool_executor_attr_reader_idletime
77
82
  def idletime
78
83
  @executor.getKeepAliveTime(java.util.concurrent.TimeUnit::SECONDS)
@@ -61,6 +61,13 @@ module Concurrent
61
61
  synchronize { @completed_task_count }
62
62
  end
63
63
 
64
+ # @!macro thread_pool_executor_method_active_count
65
+ def active_count
66
+ synchronize do
67
+ @pool.length - @ready.length
68
+ end
69
+ end
70
+
64
71
  # @!macro executor_service_method_can_overflow_question
65
72
  def can_overflow?
66
73
  synchronize { ns_limited_queue? }
@@ -3,7 +3,7 @@ require 'concurrent/atomic/event'
3
3
  require 'concurrent/collection/non_concurrent_priority_queue'
4
4
  require 'concurrent/executor/executor_service'
5
5
  require 'concurrent/executor/single_thread_executor'
6
-
6
+ require 'concurrent/errors'
7
7
  require 'concurrent/options'
8
8
 
9
9
  module Concurrent
@@ -162,7 +162,11 @@ module Concurrent
162
162
  # queue now must have the same pop time, or a closer one, as
163
163
  # when we peeked).
164
164
  task = synchronize { @queue.pop }
165
- task.executor.post { task.process_task }
165
+ begin
166
+ task.executor.post { task.process_task }
167
+ rescue RejectedExecutionError
168
+ # ignore and continue
169
+ end
166
170
  else
167
171
  @condition.wait([diff, 60].min)
168
172
  end
@@ -15,9 +15,11 @@ module Concurrent
15
15
  # @!macro internal_implementation_note
16
16
  HashImplementation = case
17
17
  when Concurrent.on_cruby?
18
- # Hash is thread-safe in practice because CRuby runs
19
- # threads one at a time and does not do context
20
- # switching during the execution of C functions.
18
+ # Hash is not fully thread-safe on CRuby, see
19
+ # https://bugs.ruby-lang.org/issues/19237
20
+ # https://github.com/ruby/ruby/commit/ffd52412ab
21
+ # https://github.com/ruby-concurrency/concurrent-ruby/issues/929
22
+ # So we will need to add synchronization here (similar to Concurrent::Map).
21
23
  ::Hash
22
24
 
23
25
  when Concurrent.on_jruby?
@@ -20,8 +20,8 @@ module Concurrent
20
20
  require 'concurrent/collection/map/truffleruby_map_backend'
21
21
  TruffleRubyMapBackend
22
22
  else
23
- require 'concurrent/collection/map/atomic_reference_map_backend'
24
- AtomicReferenceMapBackend
23
+ require 'concurrent/collection/map/synchronized_map_backend'
24
+ SynchronizedMapBackend
25
25
  end
26
26
  else
27
27
  warn 'Concurrent::Map: unsupported Ruby engine, using a fully synchronized Concurrent::Map implementation'
@@ -46,6 +46,12 @@ module Concurrent
46
46
  # @note Atomic methods taking a block do not allow the `self` instance
47
47
  # to be used within the block. Doing so will cause a deadlock.
48
48
 
49
+ # @!method []=(key, value)
50
+ # Set a value with key
51
+ # @param [Object] key
52
+ # @param [Object] value
53
+ # @return [Object] the new value
54
+
49
55
  # @!method compute_if_absent(key)
50
56
  # Compute and store new value for key if the key is absent.
51
57
  # @param [Object] key
@@ -119,41 +125,38 @@ module Concurrent
119
125
  # @return [true, false] true if deleted
120
126
  # @!macro map.atomic_method
121
127
 
122
- #
123
- def initialize(options = nil, &block)
124
- if options.kind_of?(::Hash)
125
- validate_options_hash!(options)
126
- else
127
- options = nil
128
- end
128
+ # NonConcurrentMapBackend handles default_proc natively
129
+ unless defined?(Collection::NonConcurrentMapBackend) and self < Collection::NonConcurrentMapBackend
129
130
 
130
- super(options)
131
- @default_proc = block
132
- end
131
+ # @param [Hash, nil] options options to set the :initial_capacity or :load_factor. Ignored on some Rubies.
132
+ # @param [Proc] default_proc Optional block to compute the default value if the key is not set, like `Hash#default_proc`
133
+ def initialize(options = nil, &default_proc)
134
+ if options.kind_of?(::Hash)
135
+ validate_options_hash!(options)
136
+ else
137
+ options = nil
138
+ end
133
139
 
134
- # Get a value with key
135
- # @param [Object] key
136
- # @return [Object] the value
137
- def [](key)
138
- if value = super # non-falsy value is an existing mapping, return it right away
139
- value
140
- # re-check is done with get_or_default(key, NULL) instead of a simple !key?(key) in order to avoid a race condition, whereby by the time the current thread gets to the key?(key) call
141
- # a key => value mapping might have already been created by a different thread (key?(key) would then return true, this elsif branch wouldn't be taken and an incorrent +nil+ value
142
- # would be returned)
143
- # note: nil == value check is not technically necessary
144
- elsif @default_proc && nil == value && NULL == (value = get_or_default(key, NULL))
145
- @default_proc.call(self, key)
146
- else
147
- value
140
+ super(options)
141
+ @default_proc = default_proc
148
142
  end
149
- end
150
143
 
151
- # Set a value with key
152
- # @param [Object] key
153
- # @param [Object] value
154
- # @return [Object] the new value
155
- def []=(key, value)
156
- super
144
+ # Get a value with key
145
+ # @param [Object] key
146
+ # @return [Object] the value
147
+ def [](key)
148
+ if value = super # non-falsy value is an existing mapping, return it right away
149
+ value
150
+ # re-check is done with get_or_default(key, NULL) instead of a simple !key?(key) in order to avoid a race condition, whereby by the time the current thread gets to the key?(key) call
151
+ # a key => value mapping might have already been created by a different thread (key?(key) would then return true, this elsif branch wouldn't be taken and an incorrent +nil+ value
152
+ # would be returned)
153
+ # note: nil == value check is not technically necessary
154
+ elsif @default_proc && nil == value && NULL == (value = get_or_default(key, NULL))
155
+ @default_proc.call(self, key)
156
+ else
157
+ value
158
+ end
159
+ end
157
160
  end
158
161
 
159
162
  alias_method :get, :[]
@@ -5,6 +5,7 @@ require 'concurrent/collection/lock_free_stack'
5
5
  require 'concurrent/configuration'
6
6
  require 'concurrent/errors'
7
7
  require 'concurrent/re_include'
8
+ require 'concurrent/utility/monotonic_time'
8
9
 
9
10
  module Concurrent
10
11
 
@@ -22,7 +23,7 @@ module Concurrent
22
23
  #
23
24
  # @!macro promises.param.args
24
25
  # @param [Object] args arguments which are passed to the task when it's executed.
25
- # (It might be prepended with other arguments, see the @yeild section).
26
+ # (It might be prepended with other arguments, see the @yield section).
26
27
  #
27
28
  # @!macro promises.shortcut.on
28
29
  # Shortcut of {#$0_on} with default `:io` executor supplied.
@@ -63,8 +64,8 @@ module Concurrent
63
64
  resolvable_event_on default_executor
64
65
  end
65
66
 
66
- # Created resolvable event, user is responsible for resolving the event once by
67
- # {Promises::ResolvableEvent#resolve}.
67
+ # Creates a resolvable event, user is responsible for resolving the event once
68
+ # by calling {Promises::ResolvableEvent#resolve}.
68
69
  #
69
70
  # @!macro promises.param.default_executor
70
71
  # @return [ResolvableEvent]
@@ -94,7 +95,7 @@ module Concurrent
94
95
  future_on(default_executor, *args, &task)
95
96
  end
96
97
 
97
- # Constructs new Future which will be resolved after block is evaluated on default executor.
98
+ # Constructs a new Future which will be resolved after block is evaluated on default executor.
98
99
  # Evaluation begins immediately.
99
100
  #
100
101
  # @!macro promises.param.default_executor
@@ -106,7 +107,7 @@ module Concurrent
106
107
  ImmediateEventPromise.new(default_executor).future.then(*args, &task)
107
108
  end
108
109
 
109
- # Creates resolved future with will be either fulfilled with the given value or rejection with
110
+ # Creates a resolved future with will be either fulfilled with the given value or rejected with
110
111
  # the given reason.
111
112
  #
112
113
  # @param [true, false] fulfilled
@@ -118,7 +119,7 @@ module Concurrent
118
119
  ImmediateFuturePromise.new(default_executor, fulfilled, value, reason).future
119
120
  end
120
121
 
121
- # Creates resolved future with will be fulfilled with the given value.
122
+ # Creates a resolved future which will be fulfilled with the given value.
122
123
  #
123
124
  # @!macro promises.param.default_executor
124
125
  # @param [Object] value
@@ -127,7 +128,7 @@ module Concurrent
127
128
  resolved_future true, value, nil, default_executor
128
129
  end
129
130
 
130
- # Creates resolved future with will be rejected with the given reason.
131
+ # Creates a resolved future which will be rejected with the given reason.
131
132
  #
132
133
  # @!macro promises.param.default_executor
133
134
  # @param [Object] reason
@@ -190,7 +191,7 @@ module Concurrent
190
191
  delay_on default_executor, *args, &task
191
192
  end
192
193
 
193
- # Creates new event or future which is resolved only after it is touched,
194
+ # Creates a new event or future which is resolved only after it is touched,
194
195
  # see {Concurrent::AbstractEventFuture#touch}.
195
196
  #
196
197
  # @!macro promises.param.default_executor
@@ -214,7 +215,7 @@ module Concurrent
214
215
  schedule_on default_executor, intended_time, *args, &task
215
216
  end
216
217
 
217
- # Creates new event or future which is resolved in intended_time.
218
+ # Creates a new event or future which is resolved in intended_time.
218
219
  #
219
220
  # @!macro promises.param.default_executor
220
221
  # @!macro promises.param.intended_time
@@ -240,8 +241,8 @@ module Concurrent
240
241
  zip_futures_on default_executor, *futures_and_or_events
241
242
  end
242
243
 
243
- # Creates new future which is resolved after all futures_and_or_events are resolved.
244
- # Its value is array of zipped future values. Its reason is array of reasons for rejection.
244
+ # Creates a new future which is resolved after all futures_and_or_events are resolved.
245
+ # Its value is an array of zipped future values. Its reason is an array of reasons for rejection.
245
246
  # If there is an error it rejects.
246
247
  # @!macro promises.event-conversion
247
248
  # If event is supplied, which does not have value and can be only resolved, it's
@@ -262,7 +263,7 @@ module Concurrent
262
263
  zip_events_on default_executor, *futures_and_or_events
263
264
  end
264
265
 
265
- # Creates new event which is resolved after all futures_and_or_events are resolved.
266
+ # Creates a new event which is resolved after all futures_and_or_events are resolved.
266
267
  # (Future is resolved when fulfilled or rejected.)
267
268
  #
268
269
  # @!macro promises.param.default_executor
@@ -280,8 +281,8 @@ module Concurrent
280
281
 
281
282
  alias_method :any, :any_resolved_future
282
283
 
283
- # Creates new future which is resolved after first futures_and_or_events is resolved.
284
- # Its result equals result of the first resolved future.
284
+ # Creates a new future which is resolved after the first futures_and_or_events is resolved.
285
+ # Its result equals the result of the first resolved future.
285
286
  # @!macro promises.any-touch
286
287
  # If resolved it does not propagate {Concurrent::AbstractEventFuture#touch}, leaving delayed
287
288
  # futures un-executed if they are not required any more.
@@ -300,9 +301,9 @@ module Concurrent
300
301
  any_fulfilled_future_on default_executor, *futures_and_or_events
301
302
  end
302
303
 
303
- # Creates new future which is resolved after first of futures_and_or_events is fulfilled.
304
- # Its result equals result of the first resolved future or if all futures_and_or_events reject,
305
- # it has reason of the last resolved future.
304
+ # Creates a new future which is resolved after the first futures_and_or_events is fulfilled.
305
+ # Its result equals the result of the first resolved future or if all futures_and_or_events reject,
306
+ # it has reason of the last rejected future.
306
307
  # @!macro promises.any-touch
307
308
  # @!macro promises.event-conversion
308
309
  #
@@ -319,7 +320,7 @@ module Concurrent
319
320
  any_event_on default_executor, *futures_and_or_events
320
321
  end
321
322
 
322
- # Creates new event which becomes resolved after first of the futures_and_or_events resolves.
323
+ # Creates a new event which becomes resolved after the first futures_and_or_events resolves.
323
324
  # @!macro promises.any-touch
324
325
  #
325
326
  # @!macro promises.param.default_executor
@@ -611,7 +612,7 @@ module Concurrent
611
612
  # @yieldparam [Object] value
612
613
  # @yieldparam [Object] reason
613
614
  def chain_on(executor, *args, &task)
614
- ChainPromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future
615
+ ChainPromise.new_blocked_by1(self, executor, executor, args, &task).future
615
616
  end
616
617
 
617
618
  # @return [String] Short string representation.
@@ -772,8 +773,17 @@ module Concurrent
772
773
  @Lock.synchronize do
773
774
  @Waiters.increment
774
775
  begin
775
- unless resolved?
776
- @Condition.wait @Lock, timeout
776
+ if timeout
777
+ start = Concurrent.monotonic_time
778
+ until resolved?
779
+ break if @Condition.wait(@Lock, timeout) == nil # nil means timeout
780
+ timeout -= (Concurrent.monotonic_time - start)
781
+ break if timeout <= 0
782
+ end
783
+ else
784
+ until resolved?
785
+ @Condition.wait(@Lock, timeout)
786
+ end
777
787
  end
778
788
  ensure
779
789
  # JRuby may raise ConcurrencyError
@@ -1034,7 +1044,7 @@ module Concurrent
1034
1044
  # @return [Future]
1035
1045
  # @yield [value, *args] to the task.
1036
1046
  def then_on(executor, *args, &task)
1037
- ThenPromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future
1047
+ ThenPromise.new_blocked_by1(self, executor, executor, args, &task).future
1038
1048
  end
1039
1049
 
1040
1050
  # @!macro promises.shortcut.on
@@ -1052,7 +1062,7 @@ module Concurrent
1052
1062
  # @return [Future]
1053
1063
  # @yield [reason, *args] to the task.
1054
1064
  def rescue_on(executor, *args, &task)
1055
- RescuePromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future
1065
+ RescuePromise.new_blocked_by1(self, executor, executor, args, &task).future
1056
1066
  end
1057
1067
 
1058
1068
  # @!macro promises.method.zip