concurrently 1.0.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (49) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +5 -0
  3. data/.rspec +4 -0
  4. data/.travis.yml +16 -0
  5. data/.yardopts +7 -0
  6. data/Gemfile +17 -0
  7. data/LICENSE +176 -0
  8. data/README.md +129 -0
  9. data/RELEASE_NOTES.md +49 -0
  10. data/Rakefile +28 -0
  11. data/concurrently.gemspec +33 -0
  12. data/ext/Ruby/thread.rb +28 -0
  13. data/ext/all/array.rb +24 -0
  14. data/ext/mruby/array.rb +19 -0
  15. data/ext/mruby/fiber.rb +5 -0
  16. data/ext/mruby/io.rb +54 -0
  17. data/guides/Installation.md +46 -0
  18. data/guides/Overview.md +335 -0
  19. data/guides/Performance.md +140 -0
  20. data/guides/Troubleshooting.md +262 -0
  21. data/lib/Ruby/concurrently.rb +12 -0
  22. data/lib/Ruby/concurrently/error.rb +4 -0
  23. data/lib/Ruby/concurrently/event_loop.rb +24 -0
  24. data/lib/Ruby/concurrently/event_loop/io_selector.rb +38 -0
  25. data/lib/all/concurrently/error.rb +10 -0
  26. data/lib/all/concurrently/evaluation.rb +109 -0
  27. data/lib/all/concurrently/evaluation/error.rb +18 -0
  28. data/lib/all/concurrently/event_loop.rb +101 -0
  29. data/lib/all/concurrently/event_loop/fiber.rb +37 -0
  30. data/lib/all/concurrently/event_loop/io_selector.rb +42 -0
  31. data/lib/all/concurrently/event_loop/proc_fiber_pool.rb +18 -0
  32. data/lib/all/concurrently/event_loop/run_queue.rb +111 -0
  33. data/lib/all/concurrently/proc.rb +233 -0
  34. data/lib/all/concurrently/proc/evaluation.rb +246 -0
  35. data/lib/all/concurrently/proc/fiber.rb +67 -0
  36. data/lib/all/concurrently/version.rb +8 -0
  37. data/lib/all/io.rb +248 -0
  38. data/lib/all/kernel.rb +201 -0
  39. data/lib/mruby/concurrently/proc.rb +21 -0
  40. data/lib/mruby/kernel.rb +15 -0
  41. data/mrbgem.rake +42 -0
  42. data/perf/_shared/stage.rb +33 -0
  43. data/perf/concurrent_proc_call.rb +13 -0
  44. data/perf/concurrent_proc_call_and_forget.rb +15 -0
  45. data/perf/concurrent_proc_call_detached.rb +15 -0
  46. data/perf/concurrent_proc_call_nonblock.rb +13 -0
  47. data/perf/concurrent_proc_calls.rb +49 -0
  48. data/perf/concurrent_proc_calls_awaiting.rb +48 -0
  49. metadata +144 -0
@@ -0,0 +1,10 @@
1
+ module Concurrently
2
+ # @api public
3
+ # @since 1.0.0
4
+ #
5
+ # The general error of this gem.
6
+ class Error < StandardError; end
7
+
8
+ # @private
9
+ RESCUABLE_ERRORS = [ScriptError, StandardError, SystemStackError]
10
+ end
@@ -0,0 +1,109 @@
1
+ module Concurrently
2
+ # @api public
3
+ # @since 1.0.0
4
+ #
5
+ # `Concurrently::Evaluation` represents the evaluation of the main thread
6
+ # outside of any concurrent procs.
7
+ #
8
+ # @note Evaluations are **not thread safe**. They are operating on a fiber.
9
+ # Fibers cannot be resumed inside a thread they were not created in.
10
+ #
11
+ # An instance will be returned by {current} if called outside of any
12
+ # concurrent procs.
13
+ class Evaluation
14
+ # The evaluation that is currently running in the current thread.
15
+ #
16
+ # This method is thread safe. Each thread returns its own currently running
17
+ # evaluation.
18
+ #
19
+ # @return [Evaluation]
20
+ #
21
+ # @example
22
+ # concurrent_proc do
23
+ # Concurrently::Evaluation.current # => #<Concurrently::Proc::Evaluation:0x00000000e56910>
24
+ # end.call_nonblock
25
+ #
26
+ # Concurrently::Evaluation.current # => #<Concurrently::Evaluation0x00000000e5be10>
27
+ def self.current
28
+ EventLoop.current.run_queue.current_evaluation
29
+ end
30
+
31
+ # @private
32
+ def initialize(fiber)
33
+ @fiber = fiber
34
+ end
35
+
36
+ # @private
37
+ #
38
+ # The fiber the evaluation runs inside.
39
+ attr_reader :fiber
40
+
41
+ # @!attribute [r] waiting?
42
+ #
43
+ # Checks if the evaluation is waiting
44
+ #
45
+ # @return [Boolean]
46
+ def waiting?
47
+ @waiting
48
+ end
49
+
50
+ # @private
51
+ DEFAULT_RESUME_OPTS = { deferred_only: true }.freeze
52
+
53
+ # @note The exclamation mark in its name stands for: Watch out!
54
+ # This method is potentially dangerous and can break stuff. It also
55
+ # needs to be complemented by an earlier call of {Kernel#await_resume!}.
56
+ #
57
+ # Schedules the evaluation to be resumed
58
+ #
59
+ # It needs to be complemented by an earlier call of {Kernel#await_resume!}.
60
+ #
61
+ # This method is potentially dangerous. {Kernel#wait}, {IO#await_readable},
62
+ # {IO#await_writable} and {Proc::Evaluation#await_result} are implemented
63
+ # with {Kernel#await_resume!}. Concurrent evaluations waiting because of
64
+ # them are resumed when calling {#resume!} although the event they are
65
+ # actually awaiting has not happened yet:
66
+ #
67
+ # ```ruby
68
+ # conproc = concurrent_proc do
69
+ # wait 1
70
+ # await_resume!
71
+ # end
72
+ #
73
+ # conproc.resume! # resumes the wait call prematurely
74
+ # ```
75
+ #
76
+ # To use this method safely, make sure the evaluation to resume is waiting
77
+ # because of a manual call of {Kernel#await_resume!}.
78
+ #
79
+ # @return [:resumed]
80
+ # @raise [Error] if the evaluation is not waiting
81
+ #
82
+ # @example
83
+ # # Control flow is indicated by (N)
84
+ #
85
+ # # (1)
86
+ # evaluation = concurrent_proc do
87
+ # # (2)
88
+ # await_resume!
89
+ # # (4)
90
+ # end.call_nonblock
91
+ #
92
+ # # (3)
93
+ # evaluation.resume! :result
94
+ # # (5)
95
+ # evaluation.await_result # => :result
96
+ def resume!(result = nil)
97
+ run_queue = Concurrently::EventLoop.current.run_queue
98
+
99
+ # Cancel running the fiber if it has already been scheduled to run; but
100
+ # only if it was scheduled with a time offset. This is used to cancel the
101
+ # timeout of a wait operation if the waiting fiber is resume before the
102
+ # timeout is triggered.
103
+ run_queue.cancel(self, DEFAULT_RESUME_OPTS)
104
+
105
+ run_queue.schedule_immediately(self, result)
106
+ :resumed
107
+ end
108
+ end
109
+ end
@@ -0,0 +1,18 @@
1
+ module Concurrently
2
+ class Evaluation
3
+ # @api public
4
+ # @since 1.0.0
5
+ #
6
+ # A general error for a failed evaluation. It is only used if the error
7
+ # can not be attributed to an error in the executed block of code of the
8
+ # proc itself.
9
+ class Error < Concurrently::Error; end
10
+
11
+ # @api public
12
+ # @since 1.0.0
13
+ #
14
+ # An error indicating an evaluation could not be concluded in a given
15
+ # time frame
16
+ class TimeoutError < Error; end
17
+ end
18
+ end
@@ -0,0 +1,101 @@
1
+ module Concurrently
2
+ # @api public
3
+ # @since 1.0.0
4
+ #
5
+ # @note Although you probably won't need to interact with the event loop
6
+ # directly (unless you call `Kernel#fork`, see {#reinitialize!}), you need
7
+ # to understand that it's there.
8
+ #
9
+ # @note Event loops are **not thread safe**. But since each thread has its
10
+ # own event loop they are not shared anyway.
11
+ #
12
+ # `Concurrently::EventLoop`, like any event loop, is the heart of your
13
+ # application and **must never be interrupted, blocked or overloaded.** A
14
+ # healthy event loop is one that can respond to new events immediately.
15
+ #
16
+ # The loop runs in the background and you won't interact with it directly.
17
+ # Instead, when you call `#wait` or one of the `#await_*` methods the
18
+ # bookkeeping of selecting IOs for readiness or waiting a given amount of
19
+ # time is done for you.
20
+ class EventLoop
21
+ # The event loop of the current thread.
22
+ #
23
+ # This method is thread safe. Each thread returns its own event loop.
24
+ #
25
+ # @example
26
+ # Concurrently::EventLoop.current
27
+ def self.current
28
+ @current ||= new
29
+ end
30
+
31
+ # @private
32
+ #
33
+ # A new instance
34
+ #
35
+ # An event loop is created for every thread automatically. It should not
36
+ # be instantiated manually.
37
+ def initialize
38
+ reinitialize!
39
+ end
40
+
41
+ # @note The exclamation mark in its name stands for: Watch out!
42
+ # This method will break stuff if not used in the right place.
43
+ #
44
+ # Resets the inner state of the event loop.
45
+ #
46
+ # In detail, calling this method for the event loop:
47
+ #
48
+ # * resets its {#lifetime},
49
+ # * clears its internal run queue,
50
+ # * clears its internal list of watched IOs,
51
+ # * clears its internal pool of fibers.
52
+ #
53
+ # While this method clears the list of IOs watched for readiness, the IOs
54
+ # themselves are left untouched. You are responsible for managing IOs (e.g.
55
+ # closing them).
56
+ #
57
+ # @example
58
+ # fork do
59
+ # Concurrently::EventLoop.current.reinitialize!
60
+ # # ...
61
+ # end
62
+ #
63
+ # # ...
64
+ def reinitialize!
65
+ @start_time = Time.now.to_f
66
+ @run_queue = RunQueue.new self
67
+ @io_selector = IOSelector.new self
68
+ @proc_fiber_pool = ProcFiberPool.new self
69
+ @fiber = Fiber.new @run_queue, @io_selector, @proc_fiber_pool
70
+ self
71
+ end
72
+
73
+ # @private
74
+ #
75
+ # Its run queue keeping track of and scheduling all concurrent procs
76
+ attr_reader :run_queue
77
+
78
+ # @private
79
+ #
80
+ # Its selector to watch IOs.
81
+ attr_reader :io_selector
82
+
83
+ # @private
84
+ #
85
+ # Its fiber running the actual loop
86
+ attr_reader :fiber
87
+
88
+ # @private
89
+ #
90
+ # Its pool of reusable fibers to run the code of concurrent procs in.
91
+ attr_reader :proc_fiber_pool
92
+
93
+ # The lifetime of this event loop in seconds
94
+ #
95
+ # @example
96
+ # Concurrently::EventLoop.current.lifetime # => 2.3364
97
+ def lifetime
98
+ Time.now.to_f - @start_time
99
+ end
100
+ end
101
+ end
@@ -0,0 +1,37 @@
1
+ module Concurrently
2
+ # @private
3
+ class EventLoop::Fiber < ::Fiber
4
+ def initialize(run_queue, io_selector, proc_fiber_pool)
5
+ super() do
6
+ begin
7
+ while true
8
+ if (waiting_time = run_queue.waiting_time) == 0
9
+ # Check ready IOs although fibers are ready to run to not neglect
10
+ # IO operations. Otherwise, IOs might become jammed since they
11
+ # are constantly written to but not read from.
12
+ # This behavior is not covered in the test suite. It becomes
13
+ # apparent only in situations of heavy load where this event loop
14
+ # has not much time to breathe.
15
+ io_selector.process_ready_in waiting_time if io_selector.awaiting?
16
+
17
+ run_queue.process_pending
18
+ elsif io_selector.awaiting? or waiting_time
19
+ io_selector.process_ready_in waiting_time
20
+ else
21
+ # Having no pending timeouts or IO events would make run this loop
22
+ # forever. But, since we always start the loop from one of the
23
+ # *await* methods, it is also always returning to them after waiting
24
+ # is complete. Therefore, we never reach this part of the code unless
25
+ # there is a bug or it is messed around with the internals of this gem.
26
+ raise Error, "Infinitely running event loop detected: There " <<
27
+ "are no concurrent procs or fibers scheduled and no IOs to await."
28
+ end
29
+ end
30
+ rescue Exception => e
31
+ Concurrently::EventLoop.current.reinitialize!
32
+ raise Error, "Event loop teared down by #{e.inspect}"
33
+ end
34
+ end
35
+ end
36
+ end
37
+ end
@@ -0,0 +1,42 @@
1
+ module Concurrently
2
+ # @private
3
+ class EventLoop::IOSelector
4
+ def initialize(event_loop)
5
+ @run_queue = event_loop.run_queue
6
+ @readers = {}
7
+ @writers = {}
8
+ @evaluations = {}
9
+ end
10
+
11
+ def awaiting?
12
+ @evaluations.any?
13
+ end
14
+
15
+ def await_reader(io, evaluation)
16
+ @readers[evaluation] = io
17
+ @evaluations[io] = evaluation
18
+ end
19
+
20
+ def await_writer(io, evaluation)
21
+ @writers[evaluation] = io
22
+ @evaluations[io] = evaluation
23
+ end
24
+
25
+ def cancel_reader(io)
26
+ @readers.delete @evaluations.delete io
27
+ end
28
+
29
+ def cancel_writer(io)
30
+ @writers.delete @evaluations.delete io
31
+ end
32
+
33
+ def process_ready_in(waiting_time)
34
+ waiting_time = nil if waiting_time == Float::INFINITY
35
+ if selected = IO.select(@readers.values, @writers.values, nil, waiting_time)
36
+ selected.each do |ios|
37
+ ios.each{ |io| @run_queue.resume_evaluation! @evaluations[io], true }
38
+ end
39
+ end
40
+ end
41
+ end
42
+ end
@@ -0,0 +1,18 @@
1
+ module Concurrently
2
+ # @private
3
+ # The fiber pool grows dynamically if its internal store of fibers is empty.
4
+ class EventLoop::ProcFiberPool
5
+ def initialize(event_loop)
6
+ @event_loop = event_loop
7
+ @fibers = []
8
+ end
9
+
10
+ def take_fiber
11
+ @fibers.pop or Proc::Fiber.new self
12
+ end
13
+
14
+ def return(fiber)
15
+ @fibers << fiber
16
+ end
17
+ end
18
+ end
@@ -0,0 +1,111 @@
1
+ module Concurrently
2
+ # @private
3
+ class EventLoop::RunQueue
4
+ # The items of the run queue are called carts. Carts are simple arrays
5
+ # with the following layout: [evaluation, time, result]
6
+ EVALUATION = 0; TIME = 1; RESULT = 2
7
+
8
+ # There are two tracks. The fast track and the regular cart track. The
9
+ # fast track exists for evaluations to be scheduled immediately. Having a
10
+ # dedicated track lets us just push carts to the track in the order they
11
+ # appear. This saves us the rather expensive #bisect_left computation where
12
+ # on the regular cart track to insert the cart.
13
+
14
+ # The additional cart index exists so carts can be cancelled by their
15
+ # evaluation. Cancelled carts have their evaluation set to false.
16
+
17
+ DEFAULT_CANCEL_OPTS = { deferred_only: false }.freeze
18
+
19
+ class Track < Array
20
+ def bisect_left
21
+ bsearch_index{ |item| yield item } || length
22
+ end
23
+ end
24
+
25
+ def initialize(loop)
26
+ @loop = loop
27
+ @cart_index = {}
28
+ @deferred_track = Track.new
29
+ @immediate_track = Track.new
30
+ end
31
+
32
+ def schedule_immediately(evaluation, result = nil)
33
+ cart = [evaluation, false, result]
34
+ @cart_index[evaluation.hash] = cart
35
+ @immediate_track << cart
36
+ end
37
+
38
+ def schedule_deferred(evaluation, seconds, result = nil)
39
+ cart = [evaluation, @loop.lifetime+seconds, result]
40
+ @cart_index[evaluation.hash] = cart
41
+ index = @deferred_track.bisect_left{ |tcart| tcart[TIME] <= cart[TIME] }
42
+ @deferred_track.insert(index, cart)
43
+ end
44
+
45
+ def cancel(evaluation, opts = DEFAULT_CANCEL_OPTS)
46
+ if (cart = @cart_index[evaluation.hash]) and (not opts[:deferred_only] or cart[TIME])
47
+ cart[EVALUATION] = false
48
+ end
49
+ end
50
+
51
+ def process_pending
52
+ # Clear the fast track in the beginning so that carts added to it while
53
+ # processing pending carts will be processed during the next iteration.
54
+ processing = @immediate_track
55
+ @immediate_track = []
56
+
57
+ if @deferred_track.any?
58
+ now = @loop.lifetime
59
+ index = @deferred_track.bisect_left{ |cart| cart[TIME] <= now }
60
+ @deferred_track.pop(@deferred_track.length-index).reverse_each do |cart|
61
+ processing << cart
62
+ end
63
+ end
64
+
65
+ processing.each do |cart|
66
+ @cart_index.delete cart[EVALUATION].hash
67
+ resume_evaluation! cart[EVALUATION], cart[RESULT] if cart[EVALUATION]
68
+ end
69
+ end
70
+
71
+ def waiting_time
72
+ if @immediate_track.any?
73
+ 0
74
+ elsif next_cart = @deferred_track.reverse_each.find{ |cart| cart[EVALUATION] }
75
+ waiting_time = next_cart[TIME] - @loop.lifetime
76
+ waiting_time < 0 ? 0 : waiting_time
77
+ end
78
+ end
79
+
80
+ def resume_evaluation!(evaluation, result)
81
+ previous_evaluation = @current_evaluation
82
+
83
+ case evaluation
84
+ when Proc::Fiber # this will only happen when calling Concurrently::Proc#call_and_forget
85
+ @current_evaluation = nil
86
+ evaluation.resume result
87
+ when Proc::Evaluation
88
+ @current_evaluation = evaluation
89
+ evaluation.fiber.resume result
90
+ else
91
+ @current_evaluation = nil
92
+ Fiber.yield result
93
+ end
94
+ ensure
95
+ @current_evaluation = previous_evaluation
96
+ end
97
+
98
+ # only needed in Concurrently::Proc#call_nonblock
99
+ attr_accessor :current_evaluation
100
+ attr_writer :evaluation_class
101
+
102
+ def current_evaluation
103
+ @current_evaluation ||= case fiber = Fiber.current
104
+ when Proc::Fiber
105
+ (@evaluation_class || Proc::Evaluation).new fiber
106
+ else
107
+ Evaluation.new fiber
108
+ end
109
+ end
110
+ end
111
+ end