concurrent-ruby 1.1.7 → 1.1.10

Sign up to get free protection for your applications and to get access to all the features.
Files changed (39) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +31 -0
  3. data/Gemfile +2 -7
  4. data/README.md +40 -20
  5. data/Rakefile +31 -34
  6. data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java +0 -0
  7. data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java +52 -22
  8. data/lib/concurrent-ruby/concurrent/async.rb +1 -0
  9. data/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb +1 -0
  10. data/lib/concurrent-ruby/concurrent/atomic/event.rb +2 -2
  11. data/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb +18 -2
  12. data/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb +4 -6
  13. data/lib/concurrent-ruby/concurrent/atomic/ruby_thread_local_var.rb +4 -1
  14. data/lib/concurrent-ruby/concurrent/atomic/semaphore.rb +26 -5
  15. data/lib/concurrent-ruby/concurrent/collection/map/truffleruby_map_backend.rb +14 -0
  16. data/lib/concurrent-ruby/concurrent/collection/ruby_non_concurrent_priority_queue.rb +11 -1
  17. data/lib/concurrent-ruby/concurrent/concurrent_ruby.jar +0 -0
  18. data/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb +16 -13
  19. data/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb +13 -3
  20. data/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb +1 -1
  21. data/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb +4 -0
  22. data/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb +10 -4
  23. data/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb +26 -37
  24. data/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb +5 -5
  25. data/lib/concurrent-ruby/concurrent/map.rb +13 -4
  26. data/lib/concurrent-ruby/concurrent/promise.rb +1 -0
  27. data/lib/concurrent-ruby/concurrent/scheduled_task.rb +29 -16
  28. data/lib/concurrent-ruby/concurrent/set.rb +14 -6
  29. data/lib/concurrent-ruby/concurrent/synchronization/lockable_object.rb +1 -3
  30. data/lib/concurrent-ruby/concurrent/synchronization/mutex_lockable_object.rb +12 -0
  31. data/lib/concurrent-ruby/concurrent/synchronization/rbx_lockable_object.rb +6 -0
  32. data/lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb +26 -1
  33. data/lib/concurrent-ruby/concurrent/timer_task.rb +11 -33
  34. data/lib/concurrent-ruby/concurrent/tvar.rb +19 -56
  35. data/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb +67 -35
  36. data/lib/concurrent-ruby/concurrent/utility/processor_counter.rb +2 -35
  37. data/lib/concurrent-ruby/concurrent/version.rb +1 -1
  38. data/lib/concurrent-ruby/concurrent-ruby.rb +5 -1
  39. metadata +8 -7
@@ -30,7 +30,7 @@ module Concurrent
30
30
  if @queue[k] == item
31
31
  swap(k, @length)
32
32
  @length -= 1
33
- sink(k)
33
+ sink(k) || swim(k)
34
34
  @queue.pop
35
35
  else
36
36
  k += 1
@@ -126,12 +126,17 @@ module Concurrent
126
126
  #
127
127
  # @!visibility private
128
128
  def sink(k)
129
+ success = false
130
+
129
131
  while (j = (2 * k)) <= @length do
130
132
  j += 1 if j < @length && ! ordered?(j, j+1)
131
133
  break if ordered?(k, j)
132
134
  swap(k, j)
135
+ success = true
133
136
  k = j
134
137
  end
138
+
139
+ success
135
140
  end
136
141
 
137
142
  # Percolate up to maintain heap invariant.
@@ -140,10 +145,15 @@ module Concurrent
140
145
  #
141
146
  # @!visibility private
142
147
  def swim(k)
148
+ success = false
149
+
143
150
  while k > 1 && ! ordered?(k/2, k) do
144
151
  swap(k, k/2)
145
152
  k = k/2
153
+ success = true
146
154
  end
155
+
156
+ success
147
157
  end
148
158
  end
149
159
  end
@@ -75,28 +75,31 @@ module Concurrent
75
75
 
76
76
  private
77
77
 
78
- # Handler which executes the `fallback_policy` once the queue size
79
- # reaches `max_queue`.
78
+ # Returns an action which executes the `fallback_policy` once the queue
79
+ # size reaches `max_queue`. The reason for the indirection of an action
80
+ # is so that the work can be deferred outside of synchronization.
80
81
  #
81
82
  # @param [Array] args the arguments to the task which is being handled.
82
83
  #
83
84
  # @!visibility private
84
- def handle_fallback(*args)
85
+ def fallback_action(*args)
85
86
  case fallback_policy
86
87
  when :abort
87
- raise RejectedExecutionError
88
+ lambda { raise RejectedExecutionError }
88
89
  when :discard
89
- false
90
+ lambda { false }
90
91
  when :caller_runs
91
- begin
92
- yield(*args)
93
- rescue => ex
94
- # let it fail
95
- log DEBUG, ex
96
- end
97
- true
92
+ lambda {
93
+ begin
94
+ yield(*args)
95
+ rescue => ex
96
+ # let it fail
97
+ log DEBUG, ex
98
+ end
99
+ true
100
+ }
98
101
  else
99
- fail "Unknown fallback policy #{fallback_policy}"
102
+ lambda { fail "Unknown fallback policy #{fallback_policy}" }
100
103
  end
101
104
  end
102
105
 
@@ -71,9 +71,16 @@ module Concurrent
71
71
  # @return [Integer] Number of tasks that may be enqueued before reaching `max_queue` and rejecting
72
72
  # new tasks. A value of -1 indicates that the queue may grow without bound.
73
73
 
74
-
75
-
76
-
74
+ # @!macro thread_pool_executor_method_prune_pool
75
+ # Prune the thread pool of unneeded threads
76
+ #
77
+ # What is being pruned is controlled by the min_threads and idletime
78
+ # parameters passed at pool creation time
79
+ #
80
+ # This is a no-op on some pool implementation (e.g. the Java one). The Ruby
81
+ # pool will auto-prune each time a new job is posted. You will need to call
82
+ # this method explicitely in case your application post jobs in bursts (a
83
+ # lot of jobs and then nothing for long periods)
77
84
 
78
85
  # @!macro thread_pool_executor_public_api
79
86
  #
@@ -111,6 +118,9 @@ module Concurrent
111
118
  #
112
119
  # @!method can_overflow?
113
120
  # @!macro executor_service_method_can_overflow_question
121
+ #
122
+ # @!method prune_pool
123
+ # @!macro thread_pool_executor_method_prune_pool
114
124
 
115
125
 
116
126
 
@@ -20,7 +20,7 @@ if Concurrent.on_jruby?
20
20
 
21
21
  def post(*args, &task)
22
22
  raise ArgumentError.new('no block given') unless block_given?
23
- return handle_fallback(*args, &task) unless running?
23
+ return fallback_action(*args, &task).call unless running?
24
24
  @executor.submit Job.new(args, task)
25
25
  true
26
26
  rescue Java::JavaUtilConcurrent::RejectedExecutionException
@@ -93,6 +93,10 @@ if Concurrent.on_jruby?
93
93
  super && !@executor.isTerminating
94
94
  end
95
95
 
96
+ # @!macro thread_pool_executor_method_prune_pool
97
+ def prune_pool
98
+ end
99
+
96
100
  private
97
101
 
98
102
  def ns_initialize(opts)
@@ -16,10 +16,16 @@ module Concurrent
16
16
 
17
17
  def post(*args, &task)
18
18
  raise ArgumentError.new('no block given') unless block_given?
19
- synchronize do
20
- # If the executor is shut down, reject this task
21
- return handle_fallback(*args, &task) unless running?
22
- ns_execute(*args, &task)
19
+ deferred_action = synchronize {
20
+ if running?
21
+ ns_execute(*args, &task)
22
+ else
23
+ fallback_action(*args, &task)
24
+ end
25
+ }
26
+ if deferred_action
27
+ deferred_action.call
28
+ else
23
29
  true
24
30
  end
25
31
  end
@@ -93,13 +93,8 @@ module Concurrent
93
93
  end
94
94
 
95
95
  # @!visibility private
96
- def ready_worker(worker)
97
- synchronize { ns_ready_worker worker }
98
- end
99
-
100
- # @!visibility private
101
- def worker_not_old_enough(worker)
102
- synchronize { ns_worker_not_old_enough worker }
96
+ def ready_worker(worker, last_message)
97
+ synchronize { ns_ready_worker worker, last_message }
103
98
  end
104
99
 
105
100
  # @!visibility private
@@ -112,6 +107,11 @@ module Concurrent
112
107
  synchronize { @completed_task_count += 1 }
113
108
  end
114
109
 
110
+ # @!macro thread_pool_executor_method_prune_pool
111
+ def prune_pool
112
+ synchronize { ns_prune_pool }
113
+ end
114
+
115
115
  private
116
116
 
117
117
  # @!visibility private
@@ -156,10 +156,11 @@ module Concurrent
156
156
  if ns_assign_worker(*args, &task) || ns_enqueue(*args, &task)
157
157
  @scheduled_task_count += 1
158
158
  else
159
- handle_fallback(*args, &task)
159
+ return fallback_action(*args, &task)
160
160
  end
161
161
 
162
162
  ns_prune_pool if @next_gc_time < Concurrent.monotonic_time
163
+ nil
163
164
  end
164
165
 
165
166
  # @!visibility private
@@ -192,7 +193,7 @@ module Concurrent
192
193
  # @!visibility private
193
194
  def ns_assign_worker(*args, &task)
194
195
  # keep growing if the pool is not at the minimum yet
195
- worker = (@ready.pop if @pool.size >= @min_length) || ns_add_busy_worker
196
+ worker, _ = (@ready.pop if @pool.size >= @min_length) || ns_add_busy_worker
196
197
  if worker
197
198
  worker << [task, args]
198
199
  true
@@ -223,7 +224,7 @@ module Concurrent
223
224
  def ns_worker_died(worker)
224
225
  ns_remove_busy_worker worker
225
226
  replacement_worker = ns_add_busy_worker
226
- ns_ready_worker replacement_worker, false if replacement_worker
227
+ ns_ready_worker replacement_worker, Concurrent.monotonic_time, false if replacement_worker
227
228
  end
228
229
 
229
230
  # creates new worker which has to receive work to do after it's added
@@ -242,29 +243,21 @@ module Concurrent
242
243
  # handle ready worker, giving it new job or assigning back to @ready
243
244
  #
244
245
  # @!visibility private
245
- def ns_ready_worker(worker, success = true)
246
+ def ns_ready_worker(worker, last_message, success = true)
246
247
  task_and_args = @queue.shift
247
248
  if task_and_args
248
249
  worker << task_and_args
249
250
  else
250
251
  # stop workers when !running?, do not return them to @ready
251
252
  if running?
252
- @ready.push(worker)
253
+ raise unless last_message
254
+ @ready.push([worker, last_message])
253
255
  else
254
256
  worker.stop
255
257
  end
256
258
  end
257
259
  end
258
260
 
259
- # returns back worker to @ready which was not idle for enough time
260
- #
261
- # @!visibility private
262
- def ns_worker_not_old_enough(worker)
263
- # let's put workers coming from idle_test back to the start (as the oldest worker)
264
- @ready.unshift(worker)
265
- true
266
- end
267
-
268
261
  # removes a worker which is not in not tracked in @ready
269
262
  #
270
263
  # @!visibility private
@@ -278,10 +271,17 @@ module Concurrent
278
271
  #
279
272
  # @!visibility private
280
273
  def ns_prune_pool
281
- return if @pool.size <= @min_length
282
-
283
- last_used = @ready.shift
284
- last_used << :idle_test if last_used
274
+ now = Concurrent.monotonic_time
275
+ stopped_workers = 0
276
+ while !@ready.empty? && (@pool.size - stopped_workers > @min_length)
277
+ worker, last_message = @ready.first
278
+ if now - last_message > self.idletime
279
+ stopped_workers += 1
280
+ @ready.shift
281
+ worker << :stop
282
+ else break
283
+ end
284
+ end
285
285
 
286
286
  @next_gc_time = Concurrent.monotonic_time + @gc_interval
287
287
  end
@@ -330,19 +330,10 @@ module Concurrent
330
330
 
331
331
  def create_worker(queue, pool, idletime)
332
332
  Thread.new(queue, pool, idletime) do |my_queue, my_pool, my_idletime|
333
- last_message = Concurrent.monotonic_time
334
333
  catch(:stop) do
335
334
  loop do
336
335
 
337
336
  case message = my_queue.pop
338
- when :idle_test
339
- if (Concurrent.monotonic_time - last_message) > my_idletime
340
- my_pool.remove_busy_worker(self)
341
- throw :stop
342
- else
343
- my_pool.worker_not_old_enough(self)
344
- end
345
-
346
337
  when :stop
347
338
  my_pool.remove_busy_worker(self)
348
339
  throw :stop
@@ -350,9 +341,7 @@ module Concurrent
350
341
  else
351
342
  task, args = message
352
343
  run_task my_pool, task, args
353
- last_message = Concurrent.monotonic_time
354
-
355
- my_pool.ready_worker(self)
344
+ my_pool.ready_worker(self, Concurrent.monotonic_time)
356
345
  end
357
346
  end
358
347
  end
@@ -16,10 +16,10 @@ module Concurrent
16
16
 
17
17
  # @return [Array]
18
18
  def execute(*args)
19
- synchronize do
20
- success = false
21
- value = reason = nil
19
+ success = true
20
+ value = reason = nil
22
21
 
22
+ synchronize do
23
23
  begin
24
24
  value = @task.call(*args)
25
25
  success = true
@@ -27,9 +27,9 @@ module Concurrent
27
27
  reason = ex
28
28
  success = false
29
29
  end
30
-
31
- [success, value, reason]
32
30
  end
31
+
32
+ [success, value, reason]
33
33
  end
34
34
  end
35
35
  end
@@ -15,7 +15,10 @@ module Concurrent
15
15
  when Concurrent.on_cruby?
16
16
  require 'concurrent/collection/map/mri_map_backend'
17
17
  MriMapBackend
18
- when Concurrent.on_rbx? || Concurrent.on_truffleruby?
18
+ when Concurrent.on_truffleruby? && defined?(::TruffleRuby::ConcurrentMap)
19
+ require 'concurrent/collection/map/truffleruby_map_backend'
20
+ TruffleRubyMapBackend
21
+ when Concurrent.on_truffleruby? || Concurrent.on_rbx?
19
22
  require 'concurrent/collection/map/atomic_reference_map_backend'
20
23
  AtomicReferenceMapBackend
21
24
  else
@@ -114,7 +117,7 @@ module Concurrent
114
117
  # @return [true, false] true if deleted
115
118
  # @!macro map.atomic_method
116
119
 
117
-
120
+ #
118
121
  def initialize(options = nil, &block)
119
122
  if options.kind_of?(::Hash)
120
123
  validate_options_hash!(options)
@@ -143,8 +146,15 @@ module Concurrent
143
146
  end
144
147
  end
145
148
 
149
+ # Set a value with key
150
+ # @param [Object] key
151
+ # @param [Object] value
152
+ # @return [Object] the new value
153
+ def []=(key, value)
154
+ super
155
+ end
156
+
146
157
  alias_method :get, :[]
147
- # TODO (pitr-ch 30-Oct-2018): doc
148
158
  alias_method :put, :[]=
149
159
 
150
160
  # Get a value with key, or default_value when key is absent,
@@ -271,7 +281,6 @@ module Concurrent
271
281
  each_pair { |k, v| return k if v == value }
272
282
  nil
273
283
  end unless method_defined?(:key)
274
- alias_method :index, :key if RUBY_VERSION < '1.9'
275
284
 
276
285
  # Is map empty?
277
286
  # @return [true, false]
@@ -250,6 +250,7 @@ module Concurrent
250
250
  realize(@promise_body)
251
251
  end
252
252
  else
253
+ compare_and_set_state(:pending, :unscheduled)
253
254
  @parent.execute
254
255
  end
255
256
  self
@@ -58,29 +58,42 @@ module Concurrent
58
58
  # @example Basic usage
59
59
  #
60
60
  # require 'concurrent'
61
- # require 'thread' # for Queue
62
- # require 'open-uri' # for open(uri)
61
+ # require 'csv'
62
+ # require 'open-uri'
63
63
  #
64
64
  # class Ticker
65
- # def get_year_end_closing(symbol, year)
66
- # uri = "http://ichart.finance.yahoo.com/table.csv?s=#{symbol}&a=11&b=01&c=#{year}&d=11&e=31&f=#{year}&g=m"
67
- # data = open(uri) {|f| f.collect{|line| line.strip } }
68
- # data[1].split(',')[4].to_f
69
- # end
65
+ # def get_year_end_closing(symbol, year, api_key)
66
+ # uri = "https://www.alphavantage.co/query?function=TIME_SERIES_MONTHLY&symbol=#{symbol}&apikey=#{api_key}&datatype=csv"
67
+ # data = []
68
+ # csv = URI.parse(uri).read
69
+ # if csv.include?('call frequency')
70
+ # return :rate_limit_exceeded
71
+ # end
72
+ # CSV.parse(csv, headers: true) do |row|
73
+ # data << row['close'].to_f if row['timestamp'].include?(year.to_s)
74
+ # end
75
+ # year_end = data.first
76
+ # year_end
77
+ # rescue => e
78
+ # p e
79
+ # end
70
80
  # end
71
81
  #
82
+ # api_key = ENV['ALPHAVANTAGE_KEY']
83
+ # abort(error_message) unless api_key
84
+ #
72
85
  # # Future
73
- # price = Concurrent::Future.execute{ Ticker.new.get_year_end_closing('TWTR', 2013) }
86
+ # price = Concurrent::Future.execute{ Ticker.new.get_year_end_closing('TWTR', 2013, api_key) }
74
87
  # price.state #=> :pending
75
- # sleep(1) # do other stuff
76
- # price.value #=> 63.65
77
- # price.state #=> :fulfilled
88
+ # price.pending? #=> true
89
+ # price.value(0) #=> nil (does not block)
78
90
  #
79
- # # ScheduledTask
80
- # task = Concurrent::ScheduledTask.execute(2){ Ticker.new.get_year_end_closing('INTC', 2013) }
81
- # task.state #=> :pending
82
- # sleep(3) # do other stuff
83
- # task.value #=> 25.96
91
+ # sleep(1) # do other stuff
92
+ #
93
+ # price.value #=> 63.65 (after blocking if necessary)
94
+ # price.state #=> :fulfilled
95
+ # price.fulfilled? #=> true
96
+ # price.value #=> 63.65
84
97
  #
85
98
  # @example Successful task execution
86
99
  #
@@ -19,13 +19,19 @@ module Concurrent
19
19
  #
20
20
  # @see http://ruby-doc.org/stdlib-2.4.0/libdoc/set/rdoc/Set.html Ruby standard library `Set`
21
21
 
22
-
23
22
  # @!macro internal_implementation_note
24
23
  SetImplementation = case
25
24
  when Concurrent.on_cruby?
26
- # Because MRI never runs code in parallel, the existing
27
- # non-thread-safe structures should usually work fine.
28
- ::Set
25
+ # The CRuby implementation of Set is written in Ruby itself and is
26
+ # not thread safe for certain methods.
27
+ require 'monitor'
28
+ require 'concurrent/thread_safe/util/data_structures'
29
+
30
+ class CRubySet < ::Set
31
+ end
32
+
33
+ ThreadSafe::Util.make_synchronized_on_cruby CRubySet
34
+ CRubySet
29
35
 
30
36
  when Concurrent.on_jruby?
31
37
  require 'jruby/synchronized'
@@ -33,6 +39,7 @@ module Concurrent
33
39
  class JRubySet < ::Set
34
40
  include JRuby::Synchronized
35
41
  end
42
+
36
43
  JRubySet
37
44
 
38
45
  when Concurrent.on_rbx?
@@ -41,7 +48,8 @@ module Concurrent
41
48
 
42
49
  class RbxSet < ::Set
43
50
  end
44
- ThreadSafe::Util.make_synchronized_on_rbx Concurrent::RbxSet
51
+
52
+ ThreadSafe::Util.make_synchronized_on_rbx RbxSet
45
53
  RbxSet
46
54
 
47
55
  when Concurrent.on_truffleruby?
@@ -50,7 +58,7 @@ module Concurrent
50
58
  class TruffleRubySet < ::Set
51
59
  end
52
60
 
53
- ThreadSafe::Util.make_synchronized_on_truffleruby Concurrent::TruffleRubySet
61
+ ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubySet
54
62
  TruffleRubySet
55
63
 
56
64
  else
@@ -4,9 +4,7 @@ module Concurrent
4
4
  # @!visibility private
5
5
  # @!macro internal_implementation_note
6
6
  LockableObjectImplementation = case
7
- when Concurrent.on_cruby? && Concurrent.ruby_version(:<=, 1, 9, 3)
8
- MonitorLockableObject
9
- when Concurrent.on_cruby? && Concurrent.ruby_version(:>, 1, 9, 3)
7
+ when Concurrent.on_cruby?
10
8
  MutexLockableObject
11
9
  when Concurrent.on_jruby?
12
10
  JRubyLockableObject
@@ -32,6 +32,12 @@ module Concurrent
32
32
  @__Condition__ = ::ConditionVariable.new
33
33
  end
34
34
 
35
+ def initialize_copy(other)
36
+ super
37
+ @__Lock__ = ::Mutex.new
38
+ @__Condition__ = ::ConditionVariable.new
39
+ end
40
+
35
41
  protected
36
42
 
37
43
  def synchronize
@@ -61,6 +67,12 @@ module Concurrent
61
67
  @__Condition__ = @__Lock__.new_cond
62
68
  end
63
69
 
70
+ def initialize_copy(other)
71
+ super
72
+ @__Lock__ = ::Monitor.new
73
+ @__Condition__ = @__Lock__.new_cond
74
+ end
75
+
64
76
  protected
65
77
 
66
78
  def synchronize # TODO may be a problem with lock.synchronize { lock.wait }
@@ -12,6 +12,12 @@ module Concurrent
12
12
  @__owner__ = nil
13
13
  end
14
14
 
15
+ def initialize_copy(other)
16
+ super
17
+ @__Waiters__ = []
18
+ @__owner__ = nil
19
+ end
20
+
15
21
  protected
16
22
 
17
23
  def synchronize(&block)
@@ -12,12 +12,37 @@ end
12
12
  module Concurrent
13
13
  module ThreadSafe
14
14
  module Util
15
+ def self.make_synchronized_on_cruby(klass)
16
+ klass.class_eval do
17
+ def initialize(*args, &block)
18
+ @_monitor = Monitor.new
19
+ super
20
+ end
21
+
22
+ def initialize_copy(other)
23
+ # make sure a copy is not sharing a monitor with the original object!
24
+ @_monitor = Monitor.new
25
+ super
26
+ end
27
+ end
28
+
29
+ klass.superclass.instance_methods(false).each do |method|
30
+ klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1
31
+ def #{method}(*args)
32
+ monitor = @_monitor
33
+ monitor or raise("BUG: Internal monitor was not properly initialized. Please report this to the concurrent-ruby developers.")
34
+ monitor.synchronize { super }
35
+ end
36
+ RUBY
37
+ end
38
+ end
39
+
15
40
  def self.make_synchronized_on_rbx(klass)
16
41
  klass.class_eval do
17
42
  private
18
43
 
19
44
  def _mon_initialize
20
- @_monitor = Monitor.new unless @_monitor # avoid double initialisation
45
+ @_monitor ||= Monitor.new # avoid double initialisation
21
46
  end
22
47
 
23
48
  def self.new(*args)