concurrent-ruby 1.2.2 → 1.3.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +50 -2
  3. data/Gemfile +6 -6
  4. data/README.md +7 -3
  5. data/Rakefile +50 -25
  6. data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java +1 -1
  7. data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java +1 -1
  8. data/lib/concurrent-ruby/concurrent/agent.rb +2 -2
  9. data/lib/concurrent-ruby/concurrent/array.rb +3 -3
  10. data/lib/concurrent-ruby/concurrent/async.rb +1 -1
  11. data/lib/concurrent-ruby/concurrent/atom.rb +1 -1
  12. data/lib/concurrent-ruby/concurrent/atomic/lock_local_var.rb +1 -0
  13. data/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb +23 -20
  14. data/lib/concurrent-ruby/concurrent/collection/ruby_timeout_queue.rb +55 -0
  15. data/lib/concurrent-ruby/concurrent/collection/timeout_queue.rb +18 -0
  16. data/lib/concurrent-ruby/concurrent/concern/logging.rb +17 -12
  17. data/lib/concurrent-ruby/concurrent/concurrent_ruby.jar +0 -0
  18. data/lib/concurrent-ruby/concurrent/delay.rb +1 -1
  19. data/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb +6 -4
  20. data/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb +5 -7
  21. data/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb +7 -0
  22. data/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb +2 -0
  23. data/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb +61 -31
  24. data/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb +1 -1
  25. data/lib/concurrent-ruby/concurrent/executor/timer_set.rb +10 -3
  26. data/lib/concurrent-ruby/concurrent/executors.rb +0 -1
  27. data/lib/concurrent-ruby/concurrent/hash.rb +5 -3
  28. data/lib/concurrent-ruby/concurrent/map.rb +3 -3
  29. data/lib/concurrent-ruby/concurrent/mvar.rb +4 -4
  30. data/lib/concurrent-ruby/concurrent/promise.rb +2 -2
  31. data/lib/concurrent-ruby/concurrent/promises.rb +33 -23
  32. data/lib/concurrent-ruby/concurrent/scheduled_task.rb +1 -1
  33. data/lib/concurrent-ruby/concurrent/synchronization/abstract_struct.rb +1 -1
  34. data/lib/concurrent-ruby/concurrent/synchronization/object.rb +1 -1
  35. data/lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb +1 -1
  36. data/lib/concurrent-ruby/concurrent/thread_safe/util/xor_shift_random.rb +1 -1
  37. data/lib/concurrent-ruby/concurrent/timer_task.rb +65 -10
  38. data/lib/concurrent-ruby/concurrent/utility/processor_counter.rb +116 -6
  39. data/lib/concurrent-ruby/concurrent/version.rb +1 -1
  40. metadata +5 -5
  41. data/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb +0 -927
  42. data/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb +0 -81
@@ -1,4 +1,3 @@
1
- require 'logger'
2
1
  require 'concurrent/atomic/atomic_reference'
3
2
 
4
3
  module Concurrent
@@ -8,10 +7,12 @@ module Concurrent
8
7
  #
9
8
  # @!visibility private
10
9
  module Logging
11
- include Logger::Severity
10
+ # The same as Logger::Severity but we copy it here to avoid a dependency on the logger gem just for these 7 constants
11
+ DEBUG, INFO, WARN, ERROR, FATAL, UNKNOWN = 0, 1, 2, 3, 4, 5
12
+ SEV_LABEL = %w[DEBUG INFO WARN ERROR FATAL ANY].freeze
12
13
 
13
14
  # Logs through {Concurrent.global_logger}, it can be overridden by setting @logger
14
- # @param [Integer] level one of Logger::Severity constants
15
+ # @param [Integer] level one of Concurrent::Concern::Logging constants
15
16
  # @param [String] progname e.g. a path of an Actor
16
17
  # @param [String, nil] message when nil block is used to generate the message
17
18
  # @yieldreturn [String] a message
@@ -23,7 +24,7 @@ module Concurrent
23
24
  end
24
25
  logger.call level, progname, message, &block
25
26
  rescue => error
26
- $stderr.puts "`Concurrent.configuration.logger` failed to log #{[level, progname, message, block]}\n" +
27
+ $stderr.puts "`Concurrent.global_logger` failed to log #{[level, progname, message, block]}\n" +
27
28
  "#{error.message} (#{error.class})\n#{error.backtrace.join "\n"}"
28
29
  end
29
30
  end
@@ -33,8 +34,10 @@ end
33
34
  module Concurrent
34
35
  extend Concern::Logging
35
36
 
36
- # @return [Logger] Logger with provided level and output.
37
- def self.create_simple_logger(level = Logger::FATAL, output = $stderr)
37
+ # Create a simple logger with provided level and output.
38
+ def self.create_simple_logger(level = :FATAL, output = $stderr)
39
+ level = Concern::Logging.const_get(level) unless level.is_a?(Integer)
40
+
38
41
  # TODO (pitr-ch 24-Dec-2016): figure out why it had to be replaced, stdlogger was deadlocking
39
42
  lambda do |severity, progname, message = nil, &block|
40
43
  return false if severity < level
@@ -52,7 +55,7 @@ module Concurrent
52
55
 
53
56
  output.print format "[%s] %5s -- %s: %s\n",
54
57
  Time.now.strftime('%Y-%m-%d %H:%M:%S.%L'),
55
- Logger::SEV_LABEL[severity],
58
+ Concern::Logging::SEV_LABEL[severity],
56
59
  progname,
57
60
  formatted_message
58
61
  true
@@ -60,13 +63,15 @@ module Concurrent
60
63
  end
61
64
 
62
65
  # Use logger created by #create_simple_logger to log concurrent-ruby messages.
63
- def self.use_simple_logger(level = Logger::FATAL, output = $stderr)
66
+ def self.use_simple_logger(level = :FATAL, output = $stderr)
64
67
  Concurrent.global_logger = create_simple_logger level, output
65
68
  end
66
69
 
67
- # @return [Logger] Logger with provided level and output.
70
+ # Create a stdlib logger with provided level and output.
71
+ # If you use this deprecated method you might need to add logger to your Gemfile to avoid warnings from Ruby 3.3.5+.
68
72
  # @deprecated
69
- def self.create_stdlib_logger(level = Logger::FATAL, output = $stderr)
73
+ def self.create_stdlib_logger(level = :FATAL, output = $stderr)
74
+ require 'logger'
70
75
  logger = Logger.new(output)
71
76
  logger.level = level
72
77
  logger.formatter = lambda do |severity, datetime, progname, msg|
@@ -93,7 +98,7 @@ module Concurrent
93
98
 
94
99
  # Use logger created by #create_stdlib_logger to log concurrent-ruby messages.
95
100
  # @deprecated
96
- def self.use_stdlib_logger(level = Logger::FATAL, output = $stderr)
101
+ def self.use_stdlib_logger(level = :FATAL, output = $stderr)
97
102
  Concurrent.global_logger = create_stdlib_logger level, output
98
103
  end
99
104
 
@@ -103,7 +108,7 @@ module Concurrent
103
108
  NULL_LOGGER = lambda { |level, progname, message = nil, &block| }
104
109
 
105
110
  # @!visibility private
106
- GLOBAL_LOGGER = AtomicReference.new(create_simple_logger(Logger::WARN))
111
+ GLOBAL_LOGGER = AtomicReference.new(create_simple_logger(:WARN))
107
112
  private_constant :GLOBAL_LOGGER
108
113
 
109
114
  def self.global_logger
@@ -19,7 +19,7 @@ module Concurrent
19
19
  #
20
20
  # When a `Delay` is created its state is set to `pending`. The value and
21
21
  # reason are both `nil`. The first time the `#value` method is called the
22
- # enclosed opration will be run and the calling thread will block. Other
22
+ # enclosed operation will be run and the calling thread will block. Other
23
23
  # threads attempting to call `#value` will block as well. Once the operation
24
24
  # is complete the *value* will be set to the result of the operation or the
25
25
  # *reason* will be set to the raised exception, as appropriate. All threads
@@ -39,6 +39,10 @@ module Concurrent
39
39
  # The number of tasks that have been completed by the pool since construction.
40
40
  # @return [Integer] The number of tasks that have been completed by the pool since construction.
41
41
 
42
+ # @!macro thread_pool_executor_method_active_count
43
+ # The number of threads that are actively executing tasks.
44
+ # @return [Integer] The number of threads that are actively executing tasks.
45
+
42
46
  # @!macro thread_pool_executor_attr_reader_idletime
43
47
  # The number of seconds that a thread may be idle before being reclaimed.
44
48
  # @return [Integer] The number of seconds that a thread may be idle before being reclaimed.
@@ -77,10 +81,8 @@ module Concurrent
77
81
  # What is being pruned is controlled by the min_threads and idletime
78
82
  # parameters passed at pool creation time
79
83
  #
80
- # This is a no-op on some pool implementation (e.g. the Java one). The Ruby
81
- # pool will auto-prune each time a new job is posted. You will need to call
82
- # this method explicitely in case your application post jobs in bursts (a
83
- # lot of jobs and then nothing for long periods)
84
+ # This is a no-op on all pool implementations as they prune themselves
85
+ # automatically, and has been deprecated.
84
86
 
85
87
  # @!macro thread_pool_executor_public_api
86
88
  #
@@ -46,6 +46,7 @@ if Concurrent.on_jruby?
46
46
  def kill
47
47
  synchronize do
48
48
  @executor.shutdownNow
49
+ wait_for_termination
49
50
  nil
50
51
  end
51
52
  end
@@ -57,15 +58,11 @@ if Concurrent.on_jruby?
57
58
  end
58
59
 
59
60
  def ns_shuttingdown?
60
- if @executor.respond_to? :isTerminating
61
- @executor.isTerminating
62
- else
63
- false
64
- end
61
+ @executor.isShutdown && !@executor.isTerminated
65
62
  end
66
63
 
67
64
  def ns_shutdown?
68
- @executor.isShutdown || @executor.isTerminated
65
+ @executor.isTerminated
69
66
  end
70
67
 
71
68
  class Job
@@ -88,10 +85,11 @@ if Concurrent.on_jruby?
88
85
 
89
86
  def initialize(daemonize = true)
90
87
  @daemonize = daemonize
88
+ @java_thread_factory = java.util.concurrent.Executors.defaultThreadFactory
91
89
  end
92
90
 
93
91
  def newThread(runnable)
94
- thread = java.util.concurrent.Executors.defaultThreadFactory().newThread(runnable)
92
+ thread = @java_thread_factory.newThread(runnable)
95
93
  thread.setDaemon(@daemonize)
96
94
  return thread
97
95
  end
@@ -8,6 +8,7 @@ if Concurrent.on_jruby?
8
8
  # @!macro thread_pool_options
9
9
  # @!visibility private
10
10
  class JavaThreadPoolExecutor < JavaExecutorService
11
+ include Concern::Deprecation
11
12
 
12
13
  # @!macro thread_pool_executor_constant_default_max_pool_size
13
14
  DEFAULT_MAX_POOL_SIZE = java.lang.Integer::MAX_VALUE # 2147483647
@@ -73,6 +74,11 @@ if Concurrent.on_jruby?
73
74
  @executor.getCompletedTaskCount
74
75
  end
75
76
 
77
+ # @!macro thread_pool_executor_method_active_count
78
+ def active_count
79
+ @executor.getActiveCount
80
+ end
81
+
76
82
  # @!macro thread_pool_executor_attr_reader_idletime
77
83
  def idletime
78
84
  @executor.getKeepAliveTime(java.util.concurrent.TimeUnit::SECONDS)
@@ -95,6 +101,7 @@ if Concurrent.on_jruby?
95
101
 
96
102
  # @!macro thread_pool_executor_method_prune_pool
97
103
  def prune_pool
104
+ deprecated "#prune_pool has no effect and will be removed in the next release."
98
105
  end
99
106
 
100
107
  private
@@ -1,4 +1,5 @@
1
1
  require 'concurrent/executor/ruby_thread_pool_executor'
2
+ require 'concurrent/executor/serial_executor_service'
2
3
 
3
4
  module Concurrent
4
5
 
@@ -6,6 +7,7 @@ module Concurrent
6
7
  # @!macro abstract_executor_service_public_api
7
8
  # @!visibility private
8
9
  class RubySingleThreadExecutor < RubyThreadPoolExecutor
10
+ include SerialExecutorService
9
11
 
10
12
  # @!macro single_thread_executor_method_initialize
11
13
  def initialize(opts = {})
@@ -3,6 +3,7 @@ require 'concurrent/atomic/event'
3
3
  require 'concurrent/concern/logging'
4
4
  require 'concurrent/executor/ruby_executor_service'
5
5
  require 'concurrent/utility/monotonic_time'
6
+ require 'concurrent/collection/timeout_queue'
6
7
 
7
8
  module Concurrent
8
9
 
@@ -10,6 +11,7 @@ module Concurrent
10
11
  # @!macro thread_pool_options
11
12
  # @!visibility private
12
13
  class RubyThreadPoolExecutor < RubyExecutorService
14
+ include Concern::Deprecation
13
15
 
14
16
  # @!macro thread_pool_executor_constant_default_max_pool_size
15
17
  DEFAULT_MAX_POOL_SIZE = 2_147_483_647 # java.lang.Integer::MAX_VALUE
@@ -61,6 +63,13 @@ module Concurrent
61
63
  synchronize { @completed_task_count }
62
64
  end
63
65
 
66
+ # @!macro thread_pool_executor_method_active_count
67
+ def active_count
68
+ synchronize do
69
+ @pool.length - @ready.length
70
+ end
71
+ end
72
+
64
73
  # @!macro executor_service_method_can_overflow_question
65
74
  def can_overflow?
66
75
  synchronize { ns_limited_queue? }
@@ -87,9 +96,28 @@ module Concurrent
87
96
  end
88
97
  end
89
98
 
99
+ # removes the worker if it can be pruned
100
+ #
101
+ # @return [true, false] if the worker was pruned
102
+ #
90
103
  # @!visibility private
91
- def remove_busy_worker(worker)
92
- synchronize { ns_remove_busy_worker worker }
104
+ def prune_worker(worker)
105
+ synchronize do
106
+ if ns_prunable_capacity > 0
107
+ remove_worker worker
108
+ true
109
+ else
110
+ false
111
+ end
112
+ end
113
+ end
114
+
115
+ # @!visibility private
116
+ def remove_worker(worker)
117
+ synchronize do
118
+ ns_remove_ready_worker worker
119
+ ns_remove_busy_worker worker
120
+ end
93
121
  end
94
122
 
95
123
  # @!visibility private
@@ -109,7 +137,7 @@ module Concurrent
109
137
 
110
138
  # @!macro thread_pool_executor_method_prune_pool
111
139
  def prune_pool
112
- synchronize { ns_prune_pool }
140
+ deprecated "#prune_pool has no effect and will be removed in next the release, see https://github.com/ruby-concurrency/concurrent-ruby/pull/1082."
113
141
  end
114
142
 
115
143
  private
@@ -139,9 +167,6 @@ module Concurrent
139
167
  @largest_length = 0
140
168
  @workers_counter = 0
141
169
  @ruby_pid = $$ # detects if Ruby has forked
142
-
143
- @gc_interval = opts.fetch(:gc_interval, @idletime / 2.0).to_i # undocumented
144
- @next_gc_time = Concurrent.monotonic_time + @gc_interval
145
170
  end
146
171
 
147
172
  # @!visibility private
@@ -155,12 +180,10 @@ module Concurrent
155
180
 
156
181
  if ns_assign_worker(*args, &task) || ns_enqueue(*args, &task)
157
182
  @scheduled_task_count += 1
183
+ nil
158
184
  else
159
- return fallback_action(*args, &task)
185
+ fallback_action(*args, &task)
160
186
  end
161
-
162
- ns_prune_pool if @next_gc_time < Concurrent.monotonic_time
163
- nil
164
187
  end
165
188
 
166
189
  # @!visibility private
@@ -211,7 +234,7 @@ module Concurrent
211
234
  # @!visibility private
212
235
  def ns_enqueue(*args, &task)
213
236
  return false if @synchronous
214
-
237
+
215
238
  if !ns_limited_queue? || @queue.size < @max_queue
216
239
  @queue << [task, args]
217
240
  true
@@ -258,7 +281,7 @@ module Concurrent
258
281
  end
259
282
  end
260
283
 
261
- # removes a worker which is not in not tracked in @ready
284
+ # removes a worker which is not tracked in @ready
262
285
  #
263
286
  # @!visibility private
264
287
  def ns_remove_busy_worker(worker)
@@ -267,25 +290,27 @@ module Concurrent
267
290
  true
268
291
  end
269
292
 
270
- # try oldest worker if it is idle for enough time, it's returned back at the start
271
- #
272
293
  # @!visibility private
273
- def ns_prune_pool
274
- now = Concurrent.monotonic_time
275
- stopped_workers = 0
276
- while !@ready.empty? && (@pool.size - stopped_workers > @min_length)
277
- worker, last_message = @ready.first
278
- if now - last_message > self.idletime
279
- stopped_workers += 1
280
- @ready.shift
281
- worker << :stop
282
- else break
283
- end
294
+ def ns_remove_ready_worker(worker)
295
+ if index = @ready.index { |rw, _| rw == worker }
296
+ @ready.delete_at(index)
284
297
  end
298
+ true
299
+ end
285
300
 
286
- @next_gc_time = Concurrent.monotonic_time + @gc_interval
301
+ # @return [Integer] number of excess idle workers which can be removed without
302
+ # going below min_length, or all workers if not running
303
+ #
304
+ # @!visibility private
305
+ def ns_prunable_capacity
306
+ if running?
307
+ [@pool.size - @min_length, @ready.size].min
308
+ else
309
+ @pool.size
310
+ end
287
311
  end
288
312
 
313
+ # @!visibility private
289
314
  def ns_reset_if_forked
290
315
  if $$ != @ruby_pid
291
316
  @queue.clear
@@ -305,7 +330,7 @@ module Concurrent
305
330
 
306
331
  def initialize(pool, id)
307
332
  # instance variables accessed only under pool's lock so no need to sync here again
308
- @queue = Queue.new
333
+ @queue = Collection::TimeoutQueue.new
309
334
  @pool = pool
310
335
  @thread = create_worker @queue, pool, pool.idletime
311
336
 
@@ -331,17 +356,22 @@ module Concurrent
331
356
  def create_worker(queue, pool, idletime)
332
357
  Thread.new(queue, pool, idletime) do |my_queue, my_pool, my_idletime|
333
358
  catch(:stop) do
334
- loop do
359
+ prunable = true
335
360
 
336
- case message = my_queue.pop
361
+ loop do
362
+ timeout = prunable && my_pool.running? ? my_idletime : nil
363
+ case message = my_queue.pop(timeout: timeout)
364
+ when nil
365
+ throw :stop if my_pool.prune_worker(self)
366
+ prunable = false
337
367
  when :stop
338
- my_pool.remove_busy_worker(self)
368
+ my_pool.remove_worker(self)
339
369
  throw :stop
340
-
341
370
  else
342
371
  task, args = message
343
372
  run_task my_pool, task, args
344
373
  my_pool.ready_worker(self, Concurrent.monotonic_time)
374
+ prunable = true
345
375
  end
346
376
  end
347
377
  end
@@ -27,7 +27,7 @@ module Concurrent
27
27
  # is received. This pattern has several issues. The thread itself is highly
28
28
  # susceptible to errors during processing. Also, the thread itself must be
29
29
  # constantly monitored and restarted should it die. `SingleThreadExecutor`
30
- # encapsulates all these bahaviors. The task processor is highly resilient
30
+ # encapsulates all these behaviors. The task processor is highly resilient
31
31
  # to errors from within tasks. Also, should the thread die it will
32
32
  # automatically be restarted.
33
33
  #
@@ -3,7 +3,7 @@ require 'concurrent/atomic/event'
3
3
  require 'concurrent/collection/non_concurrent_priority_queue'
4
4
  require 'concurrent/executor/executor_service'
5
5
  require 'concurrent/executor/single_thread_executor'
6
-
6
+ require 'concurrent/errors'
7
7
  require 'concurrent/options'
8
8
 
9
9
  module Concurrent
@@ -61,6 +61,7 @@ module Concurrent
61
61
  # not running.
62
62
  def kill
63
63
  shutdown
64
+ @timer_executor.kill
64
65
  end
65
66
 
66
67
  private :<<
@@ -122,7 +123,9 @@ module Concurrent
122
123
  def ns_shutdown_execution
123
124
  ns_reset_if_forked
124
125
  @queue.clear
125
- @timer_executor.kill
126
+ @condition.set
127
+ @condition.reset
128
+ @timer_executor.shutdown
126
129
  stopped_event.set
127
130
  end
128
131
 
@@ -162,7 +165,11 @@ module Concurrent
162
165
  # queue now must have the same pop time, or a closer one, as
163
166
  # when we peeked).
164
167
  task = synchronize { @queue.pop }
165
- task.executor.post { task.process_task }
168
+ begin
169
+ task.executor.post { task.process_task }
170
+ rescue RejectedExecutionError
171
+ # ignore and continue
172
+ end
166
173
  else
167
174
  @condition.wait([diff, 60].min)
168
175
  end
@@ -10,7 +10,6 @@ require 'concurrent/executor/java_thread_pool_executor'
10
10
  require 'concurrent/executor/ruby_executor_service'
11
11
  require 'concurrent/executor/ruby_single_thread_executor'
12
12
  require 'concurrent/executor/ruby_thread_pool_executor'
13
- require 'concurrent/executor/cached_thread_pool'
14
13
  require 'concurrent/executor/safe_task_executor'
15
14
  require 'concurrent/executor/serial_executor_service'
16
15
  require 'concurrent/executor/serialized_execution'
@@ -15,9 +15,11 @@ module Concurrent
15
15
  # @!macro internal_implementation_note
16
16
  HashImplementation = case
17
17
  when Concurrent.on_cruby?
18
- # Hash is thread-safe in practice because CRuby runs
19
- # threads one at a time and does not do context
20
- # switching during the execution of C functions.
18
+ # Hash is not fully thread-safe on CRuby, see
19
+ # https://bugs.ruby-lang.org/issues/19237
20
+ # https://github.com/ruby/ruby/commit/ffd52412ab
21
+ # https://github.com/ruby-concurrency/concurrent-ruby/issues/929
22
+ # So we will need to add synchronization here (similar to Concurrent::Map).
21
23
  ::Hash
22
24
 
23
25
  when Concurrent.on_jruby?
@@ -20,8 +20,8 @@ module Concurrent
20
20
  require 'concurrent/collection/map/truffleruby_map_backend'
21
21
  TruffleRubyMapBackend
22
22
  else
23
- require 'concurrent/collection/map/atomic_reference_map_backend'
24
- AtomicReferenceMapBackend
23
+ require 'concurrent/collection/map/synchronized_map_backend'
24
+ SynchronizedMapBackend
25
25
  end
26
26
  else
27
27
  warn 'Concurrent::Map: unsupported Ruby engine, using a fully synchronized Concurrent::Map implementation'
@@ -148,7 +148,7 @@ module Concurrent
148
148
  if value = super # non-falsy value is an existing mapping, return it right away
149
149
  value
150
150
  # re-check is done with get_or_default(key, NULL) instead of a simple !key?(key) in order to avoid a race condition, whereby by the time the current thread gets to the key?(key) call
151
- # a key => value mapping might have already been created by a different thread (key?(key) would then return true, this elsif branch wouldn't be taken and an incorrent +nil+ value
151
+ # a key => value mapping might have already been created by a different thread (key?(key) would then return true, this elsif branch wouldn't be taken and an incorrect +nil+ value
152
152
  # would be returned)
153
153
  # note: nil == value check is not technically necessary
154
154
  elsif @default_proc && nil == value && NULL == (value = get_or_default(key, NULL))
@@ -9,7 +9,7 @@ module Concurrent
9
9
  # queue of length one, or a special kind of mutable variable.
10
10
  #
11
11
  # On top of the fundamental `#put` and `#take` operations, we also provide a
12
- # `#mutate` that is atomic with respect to operations on the same instance.
12
+ # `#modify` that is atomic with respect to operations on the same instance.
13
13
  # These operations all support timeouts.
14
14
  #
15
15
  # We also support non-blocking operations `#try_put!` and `#try_take!`, a
@@ -87,7 +87,7 @@ module Concurrent
87
87
  @mutex.synchronize do
88
88
  wait_for_full(timeout)
89
89
 
90
- # if we timeoud out we'll still be empty
90
+ # If we timed out we'll still be empty
91
91
  if unlocked_full?
92
92
  yield @value
93
93
  else
@@ -116,10 +116,10 @@ module Concurrent
116
116
  end
117
117
 
118
118
  # Atomically `take`, yield the value to a block for transformation, and then
119
- # `put` the transformed value. Returns the transformed value. A timeout can
119
+ # `put` the transformed value. Returns the pre-transform value. A timeout can
120
120
  # be set to limit the time spent blocked, in which case it returns `TIMEOUT`
121
121
  # if the time is exceeded.
122
- # @return [Object] the transformed value, or `TIMEOUT`
122
+ # @return [Object] the pre-transform value, or `TIMEOUT`
123
123
  def modify(timeout = nil)
124
124
  raise ArgumentError.new('no block given') unless block_given?
125
125
 
@@ -103,7 +103,7 @@ module Concurrent
103
103
  # - if parent is *rejected* the child will be *pending* (but will ultimately be *rejected*)
104
104
  #
105
105
  # Promises are executed asynchronously from the main thread. By the time a
106
- # child Promise finishes intialization it may be in a different state than its
106
+ # child Promise finishes initialization it may be in a different state than its
107
107
  # parent (by the time a child is created its parent may have completed
108
108
  # execution and changed state). Despite being asynchronous, however, the order
109
109
  # of execution of Promise objects in a chain (or tree) is strictly defined.
@@ -167,7 +167,7 @@ module Concurrent
167
167
  # c2 = p.then(-> reason { raise 'Boom!' })
168
168
  #
169
169
  # c1.wait.state #=> :fulfilled
170
- # c1.value #=> 45
170
+ # c1.value #=> 42
171
171
  # c2.wait.state #=> :rejected
172
172
  # c2.reason #=> #<RuntimeError: Boom!>
173
173
  # ```