concurrent_monitor 0.0.1.ci.release
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/lib/async/monitor.rb +113 -0
- data/lib/concurrent_monitor/barrier.rb +149 -0
- data/lib/concurrent_monitor/condition_variable.rb +26 -0
- data/lib/concurrent_monitor/future.rb +81 -0
- data/lib/concurrent_monitor/queue.rb +70 -0
- data/lib/concurrent_monitor/semaphore.rb +79 -0
- data/lib/concurrent_monitor/task.rb +36 -0
- data/lib/concurrent_monitor/timeout_clock.rb +111 -0
- data/lib/concurrent_monitor/version.rb +6 -0
- data/lib/concurrent_monitor/wait_timeout.rb +24 -0
- data/lib/concurrent_monitor.rb +129 -0
- data/lib/thread/monitor.rb +139 -0
- metadata +52 -0
checksums.yaml
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
---
|
|
2
|
+
SHA256:
|
|
3
|
+
metadata.gz: 0fb40509148c10d59f1faf6655bd3bb5caf6bc2afc17dc7a3e63b7bfae6d04d1
|
|
4
|
+
data.tar.gz: 071b45c7a8e404b2d8fa6c175355ae8066bdc8693cdff951fce12d07dc2865bd
|
|
5
|
+
SHA512:
|
|
6
|
+
metadata.gz: 9ec7055d648bb1f10c4f74e28fc0b4d7f2ddbdad9df2851b88ee2f5af2c7cc60cf12ddbd2235e3e45eaadbc78b221bc5a693ec76a44f411866e4b96333c6c025
|
|
7
|
+
data.tar.gz: 10717aee9fa99a78e800e53bf258e0634e4c7ca81d6f399ebdcecd62701348fb4847c77b14ce340a89076f979c0289eee969348bd3cf7a9d07aae210a4a576f0
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'async'
|
|
4
|
+
require_relative '../concurrent_monitor'
|
|
5
|
+
|
|
6
|
+
Fiber.attr_accessor :concurrent_monitor_task
|
|
7
|
+
|
|
8
|
+
module Async
|
|
9
|
+
# Implements the ConcurrentMonitor interface using Async::Tasks
|
|
10
|
+
class Monitor
|
|
11
|
+
# Common task interface over Async::Task
|
|
12
|
+
class Task < ConcurrentMonitor::Task
|
|
13
|
+
def initialize(name = nil, report_on_exception: true, &block)
|
|
14
|
+
super()
|
|
15
|
+
Async(annotation: name, finished: report_on_exception ? nil : Async::Condition.new) do |task|
|
|
16
|
+
@task = task
|
|
17
|
+
Fiber.current.concurrent_monitor_task = self
|
|
18
|
+
block.call(self)
|
|
19
|
+
end
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
def alive? = @task.alive?
|
|
23
|
+
|
|
24
|
+
def current? = @task.current?
|
|
25
|
+
|
|
26
|
+
def value = @task.wait
|
|
27
|
+
|
|
28
|
+
def stop
|
|
29
|
+
raise Async::Stop if current?
|
|
30
|
+
|
|
31
|
+
# defer stop (need to call value before stopped?) and don't stop if we are already stopped
|
|
32
|
+
# (since this will change the status to :stopped)
|
|
33
|
+
@task.stop(true) unless @task.finished?
|
|
34
|
+
self
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
def stopped?
|
|
38
|
+
@task.stopped?
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def to_s = @task.to_s
|
|
42
|
+
|
|
43
|
+
def inspect = @task.inspect
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
# Functions
|
|
47
|
+
module Functions
|
|
48
|
+
# @return Async::Task
|
|
49
|
+
def async(...)
|
|
50
|
+
Task.new(...)
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
# Execute a block, wrapped in a Reactor if this fiber is not already in one
|
|
54
|
+
def sync(name = nil, &)
|
|
55
|
+
Sync(annotation: name, &)
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
def current_task
|
|
59
|
+
Fiber.current.concurrent_monitor_task
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
def new_monitor
|
|
63
|
+
SINGLETON # there is only one
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
# No-op yield. This is only really a critical section as long as the fiber is not yielded
|
|
67
|
+
def synchronize
|
|
68
|
+
yield
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
def task_dump(io = $stderr)
|
|
72
|
+
io.puts "=== Async Task Dump at #{Time.now} ==="
|
|
73
|
+
if Async::Task.current?
|
|
74
|
+
reactor = Async::Task.current.reactor
|
|
75
|
+
reactor.print_hierarchy($stderr)
|
|
76
|
+
else
|
|
77
|
+
io.puts 'No current Async task context'
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
# Condition with timeout support
|
|
83
|
+
class ConditionVariable < Async::Condition
|
|
84
|
+
include ConcurrentMonitor::ConditionVariable
|
|
85
|
+
|
|
86
|
+
def wait(timeout = nil)
|
|
87
|
+
return super() unless timeout
|
|
88
|
+
|
|
89
|
+
# return values match MonitorMixin::ConditionVariable
|
|
90
|
+
Async::Task.current.with_timeout(timeout) do
|
|
91
|
+
super()
|
|
92
|
+
rescue Async::TimeoutError
|
|
93
|
+
nil
|
|
94
|
+
end
|
|
95
|
+
end
|
|
96
|
+
|
|
97
|
+
# Note signal is broadcast anyway, so all the waiting fibers are resumed.
|
|
98
|
+
alias broadcast signal
|
|
99
|
+
end
|
|
100
|
+
|
|
101
|
+
private_constant :ConditionVariable
|
|
102
|
+
|
|
103
|
+
include Functions
|
|
104
|
+
extend Functions
|
|
105
|
+
|
|
106
|
+
def new_condition
|
|
107
|
+
ConditionVariable.new
|
|
108
|
+
end
|
|
109
|
+
|
|
110
|
+
# @!visibility private
|
|
111
|
+
SINGLETON = new
|
|
112
|
+
end
|
|
113
|
+
end
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module ConcurrentMonitor
|
|
4
|
+
# A Barrier/Waiter for joining and/or enumerating a group of tasks
|
|
5
|
+
#
|
|
6
|
+
# * The iterator methods return lazy enumerators so that results can be processed as tasks complete
|
|
7
|
+
# * Methods ending in `!` ensure remaining tasks are stopped if enumeration finishes early, e.g. when an exception is
|
|
8
|
+
# raised.
|
|
9
|
+
class Barrier
|
|
10
|
+
include Enumerable
|
|
11
|
+
|
|
12
|
+
# Create a Barrier, yield it to the supplied block then wait for tasks to complete, ensuring
|
|
13
|
+
# remaining tasks are stopped.
|
|
14
|
+
#
|
|
15
|
+
# @return [Object] the result of the block
|
|
16
|
+
# @example just start and wait for tasks
|
|
17
|
+
# Barrier.wait!(monitor) do |barrier|
|
|
18
|
+
# jobs.each { |j| barrier.async { j.run } }
|
|
19
|
+
# end
|
|
20
|
+
# @example enumerate results
|
|
21
|
+
# Barrier.wait!(monitor) do |barrier|
|
|
22
|
+
# jobs.each { |j| barrier.async { j.run } }
|
|
23
|
+
# barrier.to_a
|
|
24
|
+
# end
|
|
25
|
+
# @example short circuit enumeration with each!
|
|
26
|
+
# Barrier.wait!(monitor) do |barrier|
|
|
27
|
+
# jobs.each { |j| barrier.async { j.run } }
|
|
28
|
+
# # using the each! enumerator ensures the remaining tasks are stopped after the first two results are found
|
|
29
|
+
# barrier.each!.first(2)
|
|
30
|
+
# end
|
|
31
|
+
def self.wait!(monitor:, &)
|
|
32
|
+
new(monitor:).wait!(&)
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
# @param [Mixin] monitor
|
|
36
|
+
def initialize(monitor:)
|
|
37
|
+
@monitor = monitor
|
|
38
|
+
@queue = monitor.new_queue
|
|
39
|
+
@tasks = Set.new
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
# Start a task within the barrier
|
|
43
|
+
# @param [:to_s] name
|
|
44
|
+
# @param [Boolean] report_on_exception
|
|
45
|
+
# @return [Task]
|
|
46
|
+
def async(name = nil, report_on_exception: false, &)
|
|
47
|
+
synchronize { monitor.async(name, report_on_exception:) { |t| run_task(t, &) }.tap { |t| tasks << t } }
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
# Yield each task as it completes
|
|
51
|
+
# @return [Enumerator::Lazy] if no block is given?
|
|
52
|
+
# @return [void]
|
|
53
|
+
def each_task
|
|
54
|
+
return enum_for(:each_task).lazy unless block_given?
|
|
55
|
+
|
|
56
|
+
while (t = dequeue)
|
|
57
|
+
yield t
|
|
58
|
+
end
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
# {#each_task}, ensuring {#stop}
|
|
62
|
+
# @return [Enumerator::Lazy] if no block is given?
|
|
63
|
+
# @return [void]
|
|
64
|
+
def each_task!(&)
|
|
65
|
+
return enum_for(:each_task!).lazy unless block_given?
|
|
66
|
+
|
|
67
|
+
ensure_stop { each_task(&) }
|
|
68
|
+
end
|
|
69
|
+
|
|
70
|
+
# Yield the value of each task as it completes
|
|
71
|
+
# @return [Enumerator::Lazy] if no block is given
|
|
72
|
+
# @return [void]
|
|
73
|
+
def each
|
|
74
|
+
return enum_for(:each).lazy unless block_given?
|
|
75
|
+
|
|
76
|
+
each_task do |t|
|
|
77
|
+
v = t.value
|
|
78
|
+
yield v unless t.stopped?
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
# {#each}, ensuring {#stop}
|
|
83
|
+
# @return [Enumerator::Lazy] if no block is given
|
|
84
|
+
# @return [void]
|
|
85
|
+
def each!
|
|
86
|
+
return enum_for(:each!).lazy unless block_given?
|
|
87
|
+
|
|
88
|
+
ensure_stop { each(&block) }
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
# Optionally yield then wait for tasks
|
|
92
|
+
# @yield[self]
|
|
93
|
+
# @return [Object] result of yield if block is given
|
|
94
|
+
# @return [self] if no block is given
|
|
95
|
+
def wait
|
|
96
|
+
(block_given? ? yield(self) : self).tap { each(&:itself) }
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
# {#wait}, ensuring {#stop}
|
|
100
|
+
def wait!(&) = ensure_stop { wait(&) }
|
|
101
|
+
|
|
102
|
+
def stop
|
|
103
|
+
current, stopping = synchronize { tasks.partition(&:current?) }
|
|
104
|
+
stopping.each(&:stop)
|
|
105
|
+
current.first&.stop
|
|
106
|
+
self
|
|
107
|
+
end
|
|
108
|
+
|
|
109
|
+
def empty?
|
|
110
|
+
synchronize { tasks.empty? && queue.empty? }
|
|
111
|
+
end
|
|
112
|
+
|
|
113
|
+
def size
|
|
114
|
+
tasks.size
|
|
115
|
+
end
|
|
116
|
+
|
|
117
|
+
def ready
|
|
118
|
+
queue.size
|
|
119
|
+
end
|
|
120
|
+
|
|
121
|
+
def synchronize(&) = monitor.synchronize(&)
|
|
122
|
+
def new_condition = monitor.new_condition
|
|
123
|
+
|
|
124
|
+
private
|
|
125
|
+
|
|
126
|
+
attr_reader :monitor, :queue, :tasks
|
|
127
|
+
|
|
128
|
+
def ensure_stop(method = nil)
|
|
129
|
+
yield if block_given?
|
|
130
|
+
send(method) if method
|
|
131
|
+
ensure
|
|
132
|
+
stop
|
|
133
|
+
end
|
|
134
|
+
|
|
135
|
+
def run_task(task)
|
|
136
|
+
yield(task)
|
|
137
|
+
ensure
|
|
138
|
+
queue.push(task)
|
|
139
|
+
end
|
|
140
|
+
|
|
141
|
+
def dequeue
|
|
142
|
+
synchronize do
|
|
143
|
+
return nil if empty?
|
|
144
|
+
|
|
145
|
+
queue.dequeue.tap { |t| tasks.delete(t) if t }
|
|
146
|
+
end
|
|
147
|
+
end
|
|
148
|
+
end
|
|
149
|
+
end
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative 'wait_timeout'
|
|
4
|
+
|
|
5
|
+
module ConcurrentMonitor
|
|
6
|
+
# Concurrency primitive for waiting on signals with optional timeout
|
|
7
|
+
# @abstract
|
|
8
|
+
# @see ConcurrentMonitor::new_condition
|
|
9
|
+
module ConditionVariable
|
|
10
|
+
include ConcurrentMonitor::WaitTimeout
|
|
11
|
+
|
|
12
|
+
# @!method wait(timeout = nil)
|
|
13
|
+
# Wait for {#broadcast}
|
|
14
|
+
# @param [Numeric] timeout in seconds
|
|
15
|
+
# @return [void] always true
|
|
16
|
+
# @note must be called within the {ConcurrentMonitor::Mixin#synchronize} method of this condition's monitor.
|
|
17
|
+
# @note A condition return from at any time, regardless of broadcast or timeout. See {#wait_until}, #{wait_while}
|
|
18
|
+
|
|
19
|
+
# @!method broadcast
|
|
20
|
+
# Signal all waiters
|
|
21
|
+
# @note must be called within the {ConcurrentMonitor::Mixin#synchronize} method of this condition's monitor.
|
|
22
|
+
|
|
23
|
+
# @!parse
|
|
24
|
+
# alias signal broadcast
|
|
25
|
+
end
|
|
26
|
+
end
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module ConcurrentMonitor
|
|
4
|
+
# A passive future value
|
|
5
|
+
class Future
|
|
6
|
+
def initialize(monitor:, condition: monitor.new_condition)
|
|
7
|
+
@monitor = monitor
|
|
8
|
+
@condition = condition
|
|
9
|
+
@completed = false
|
|
10
|
+
@value = nil
|
|
11
|
+
@error = nil
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
# Blocks until the future is completed or the timeout expires
|
|
15
|
+
# @return [Object] The value with which the future was fulfilled
|
|
16
|
+
# @raise [StandardError] If the future was rejected
|
|
17
|
+
def value
|
|
18
|
+
wait
|
|
19
|
+
raise @error if @error
|
|
20
|
+
|
|
21
|
+
@value
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def wait(timeout = nil, **wait_opts)
|
|
25
|
+
synchronize { condition.wait_until(timeout, **wait_opts) { @completed } }
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
# Resolve the future with a block
|
|
29
|
+
def resolve
|
|
30
|
+
complete! do
|
|
31
|
+
@value = yield
|
|
32
|
+
rescue StandardError => e
|
|
33
|
+
@error = e
|
|
34
|
+
end
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
# Fulfills this future with the given value
|
|
38
|
+
# @param value [Object] The value to fulfill this future with
|
|
39
|
+
# @return [Boolean] true if the future was fulfilled, false if already completed
|
|
40
|
+
def fulfill(value)
|
|
41
|
+
complete! { @value = value }
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
# Rejects this future with the given error
|
|
45
|
+
# @param error [Exception] The error to reject this future with
|
|
46
|
+
# @return [Boolean] true if the future was rejected, false if already completed
|
|
47
|
+
def reject(error)
|
|
48
|
+
error = error.new('rejected') if error.is_a?(Class)
|
|
49
|
+
complete! { @error = error }
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
# @return [Boolean] true if this future has been completed
|
|
53
|
+
def completed?
|
|
54
|
+
synchronize { @completed }
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
# @return [Boolean] true if this future has not yet been completed
|
|
58
|
+
def pending?
|
|
59
|
+
!completed?
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
private
|
|
63
|
+
|
|
64
|
+
attr_reader :monitor, :condition
|
|
65
|
+
|
|
66
|
+
def complete!
|
|
67
|
+
synchronize do
|
|
68
|
+
return false if @completed
|
|
69
|
+
|
|
70
|
+
yield
|
|
71
|
+
ensure
|
|
72
|
+
@completed = true
|
|
73
|
+
@condition.broadcast
|
|
74
|
+
end
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
def synchronize(&)
|
|
78
|
+
monitor.synchronize(&)
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
end
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module ConcurrentMonitor
|
|
4
|
+
# A simple queue with timeout suitable for use with a Monitor
|
|
5
|
+
class Queue
|
|
6
|
+
# @param [ConcurrentMonitor] monitor for this queue
|
|
7
|
+
def initialize(monitor:)
|
|
8
|
+
@items = []
|
|
9
|
+
@monitor = monitor
|
|
10
|
+
@condition = monitor.new_condition
|
|
11
|
+
end
|
|
12
|
+
|
|
13
|
+
def push(*items)
|
|
14
|
+
with_items(__method__, *items, signal: true)
|
|
15
|
+
end
|
|
16
|
+
alias enqueue push
|
|
17
|
+
|
|
18
|
+
def <<(item)
|
|
19
|
+
with_items(__method__, item, signal: true)
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
def unshift(*items)
|
|
23
|
+
with_items(__method__, *items, signal: true)
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
def pop(timeout = nil, **wait_opts)
|
|
27
|
+
when_not_empty(__method__, timeout, **wait_opts)
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
def shift(timeout = nil, **wait_opts)
|
|
31
|
+
when_not_empty(__method__, timeout, **wait_opts)
|
|
32
|
+
end
|
|
33
|
+
alias dequeue shift
|
|
34
|
+
|
|
35
|
+
# For inspection, not synchronised
|
|
36
|
+
def items
|
|
37
|
+
@items.dup
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
# For inspection not synchronized!
|
|
41
|
+
def size
|
|
42
|
+
@items.size
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
def respond_to_missing?(method_name, include_private = false)
|
|
46
|
+
@items.respond_to?(method_name, false) || super
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
# Operates on the underlying Array synchronized by this Queue's monitor
|
|
50
|
+
# @note Accepts a :signal keyword argument to control whether waiters are signaled
|
|
51
|
+
def method_missing(...)
|
|
52
|
+
with_items(...)
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
private
|
|
56
|
+
|
|
57
|
+
attr_reader :monitor, :condition
|
|
58
|
+
|
|
59
|
+
def with_items(method, *, signal: method.end_with?('!'), &)
|
|
60
|
+
monitor.synchronize { @items.public_send(method, *, &).tap { condition.broadcast if signal } }
|
|
61
|
+
end
|
|
62
|
+
|
|
63
|
+
def when_not_empty(method, timeout = nil, **wait_opts)
|
|
64
|
+
monitor.synchronize do
|
|
65
|
+
condition.wait_while(timeout, **wait_opts) { @items.empty? } if @items.empty?
|
|
66
|
+
@items.public_send(method)
|
|
67
|
+
end
|
|
68
|
+
end
|
|
69
|
+
end
|
|
70
|
+
end
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module ConcurrentMonitor
|
|
4
|
+
# Limits the number of concurrently running tasks started with this Semaphore
|
|
5
|
+
# @example with Barrier, with early exit
|
|
6
|
+
# Barrier.wait!(monitor) do |barrier
|
|
7
|
+
# # Start a task to feed 100 tasks into the barrier 5 at a time
|
|
8
|
+
# Semaphore.new(barrier, 5).async_tasks do |semaphore|
|
|
9
|
+
# 100.times { |i| semaphore.async { start_job(i) } }
|
|
10
|
+
# end
|
|
11
|
+
# # Wait! here for the all the tasks, including the feeder, to finish.
|
|
12
|
+
# # but if any one of them fails, all currently running tasks AND the feeder will be stopped
|
|
13
|
+
# end
|
|
14
|
+
class Semaphore
|
|
15
|
+
# Create a semaphore, load it up with tasks, and then wait for those tasks to finish
|
|
16
|
+
def self.wait(monitor:, limit:, &)
|
|
17
|
+
new(monitor:, limit:).wait(&)
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
# @param [Mixin|Barrier] monitor the monitor for starting async tasks.
|
|
21
|
+
def initialize(monitor:, limit:)
|
|
22
|
+
@monitor = monitor
|
|
23
|
+
@limit = limit
|
|
24
|
+
@condition = monitor.new_condition
|
|
25
|
+
@task_count = 0
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
# Run an async task, to feed other tasks - eg into a barrier
|
|
29
|
+
#
|
|
30
|
+
# @return [Task] a task executing block (which is expected to call #{async} to feed tasks )
|
|
31
|
+
# @note this task counts against the semaphore limit
|
|
32
|
+
def async_tasks(&block)
|
|
33
|
+
async do |task|
|
|
34
|
+
block.call(self)
|
|
35
|
+
task.stop
|
|
36
|
+
end
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
# Start a task, blocking until the semaphore can be acquired
|
|
40
|
+
def async(name = nil, report_on_exception: true, &)
|
|
41
|
+
synchronize do
|
|
42
|
+
@condition.wait_while { @task_count >= @limit }
|
|
43
|
+
@task_count += 1
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
monitor.async(name, report_on_exception:) { |t| run_task(t, &) }
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
# Wait until all tasks are finished (without understanding anything about their results)
|
|
50
|
+
#
|
|
51
|
+
# If a block is given it is sent to {#async_tasks} before waiting
|
|
52
|
+
# @return [self]
|
|
53
|
+
# @see Barrier
|
|
54
|
+
def wait(timeout = nil, **wait_opts, &block)
|
|
55
|
+
async_tasks(&block) if block
|
|
56
|
+
synchronize { @condition.wait_while(timeout, **wait_opts) { @task_count.positive? } }
|
|
57
|
+
self
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
def synchronize(&)
|
|
61
|
+
monitor.synchronize(&)
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
attr_reader :task_count, :limit
|
|
65
|
+
|
|
66
|
+
private
|
|
67
|
+
|
|
68
|
+
attr_reader :monitor, :condition
|
|
69
|
+
|
|
70
|
+
def run_task(task, &block)
|
|
71
|
+
block.call(task)
|
|
72
|
+
ensure
|
|
73
|
+
synchronize do
|
|
74
|
+
@task_count -= 1
|
|
75
|
+
@condition.signal
|
|
76
|
+
end
|
|
77
|
+
end
|
|
78
|
+
end
|
|
79
|
+
end
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative 'wait_timeout'
|
|
4
|
+
|
|
5
|
+
module ConcurrentMonitor
|
|
6
|
+
# @abstract
|
|
7
|
+
# Common interface to underlying tasks
|
|
8
|
+
class Task
|
|
9
|
+
# @!method value
|
|
10
|
+
# Wait for task to complete and return its value
|
|
11
|
+
# @return [Object]
|
|
12
|
+
# @raise [StandardError]
|
|
13
|
+
|
|
14
|
+
# Wait for task to complete
|
|
15
|
+
# @return [self]
|
|
16
|
+
def wait
|
|
17
|
+
value
|
|
18
|
+
self
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
alias join wait
|
|
22
|
+
|
|
23
|
+
# @!method stop
|
|
24
|
+
# @return [self] after stopping
|
|
25
|
+
# @raise [Exception] if called from the current task
|
|
26
|
+
|
|
27
|
+
# @!method stopped?
|
|
28
|
+
# @return [Boolean] true if the task has completed via stop.
|
|
29
|
+
|
|
30
|
+
# @!method alive?
|
|
31
|
+
# @return [Boolean] true if the task has not reached completion (as seen by the calling task)
|
|
32
|
+
|
|
33
|
+
# @!method current?
|
|
34
|
+
# @return [Boolean] true if this task is the current thread/fiber
|
|
35
|
+
end
|
|
36
|
+
end
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module ConcurrentMonitor
|
|
4
|
+
# The timeout clock tracks monotonic time for the completion of an event
|
|
5
|
+
#
|
|
6
|
+
# Inspired by Async::Clock
|
|
7
|
+
class TimeoutClock
|
|
8
|
+
# A module for creating and waiting with TimeoutClock
|
|
9
|
+
module Mixin
|
|
10
|
+
# @return [Numeric] current monotonic time
|
|
11
|
+
def now
|
|
12
|
+
::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
# Create and start a new TimeoutClock
|
|
16
|
+
def timeout(timeout)
|
|
17
|
+
return NIL_TIMEOUT_CLOCK unless timeout
|
|
18
|
+
return timeout if timeout.is_a?(TimeoutClock)
|
|
19
|
+
|
|
20
|
+
TimeoutClock.new(timeout).tap(&:start!)
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
# Create a TimeoutClock and wait until block is true. See {#wait_until}
|
|
24
|
+
# @example
|
|
25
|
+
# TimeoutClock.wait_until(60) { closed? || (sleep(1) && false)}
|
|
26
|
+
def wait_until(timeout = nil, delay: nil, exception: nil, &)
|
|
27
|
+
timeout(timeout).wait_until(exception:, delay:, &)
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
# Create a TimeoutClock and wait while block is true. See {#wait_while}
|
|
31
|
+
def wait_while(timeout = nil, delay: nil, exception: nil, &)
|
|
32
|
+
timeout(timeout).wait_while(exception:, delay:, &)
|
|
33
|
+
end
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
extend Mixin
|
|
37
|
+
|
|
38
|
+
# @param [Numeric] duration the duration in seconds before timeout.
|
|
39
|
+
def initialize(duration)
|
|
40
|
+
@timeout = duration
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
# Start the timeout
|
|
44
|
+
def start!
|
|
45
|
+
@start = TimeoutClock.now if @timeout
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
# @return [Numeric] remaining duration before timeout
|
|
49
|
+
def remaining
|
|
50
|
+
return nil unless @timeout
|
|
51
|
+
|
|
52
|
+
@timeout - duration
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
# @return [Boolean] true if the timeout has expired
|
|
56
|
+
# @yield(remaining)
|
|
57
|
+
# if block provided and not yet expired
|
|
58
|
+
# @yieldparam [Numeric] remaining
|
|
59
|
+
# @yieldreturn [void]
|
|
60
|
+
def expired?
|
|
61
|
+
return true if (rem = remaining)&.negative?
|
|
62
|
+
|
|
63
|
+
yield rem if block_given?
|
|
64
|
+
false
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
# @return [Numeric] time in seconds since {#start!}
|
|
68
|
+
def duration
|
|
69
|
+
@start ? TimeoutClock.now - @start : 0
|
|
70
|
+
end
|
|
71
|
+
|
|
72
|
+
# Repeatedly call block, passing remaining duration, until it returns truthy or the timeout is expired
|
|
73
|
+
# @param [Class] exception to raise on timeout
|
|
74
|
+
# @param [Numeric] delay duration in seconds to sleep between calls to block
|
|
75
|
+
# @yield[remaining]
|
|
76
|
+
# @yieldparam [Numeric] remaining the remaining time in the timeout
|
|
77
|
+
# @yieldreturn [Boolean] something truthy to indicate the wait is over
|
|
78
|
+
# @return [Object]
|
|
79
|
+
# the truthy value of the block, or false if the timeout was expired and no exception class was provided
|
|
80
|
+
# @raise [StandardError]
|
|
81
|
+
# an instance of the exception class if the timeout expires before the block returns a truthy value
|
|
82
|
+
def wait_until(exception: nil, delay: nil)
|
|
83
|
+
loop do
|
|
84
|
+
next unless expired? do |remaining|
|
|
85
|
+
if (result = yield remaining)
|
|
86
|
+
return result
|
|
87
|
+
end
|
|
88
|
+
|
|
89
|
+
sleep([delay, self.remaining].min) if delay
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
raise exception, 'timed out' if exception
|
|
93
|
+
|
|
94
|
+
return false
|
|
95
|
+
end
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
# Wait while block is truthy
|
|
99
|
+
# @param [StandardError] exception
|
|
100
|
+
# @yield(remaining)
|
|
101
|
+
# @yieldparam [Numeric] remaining the remaining time in the timeout
|
|
102
|
+
# @yieldreturn [Boolean] true if waiting should continue
|
|
103
|
+
# @return [void]
|
|
104
|
+
# @raise [StandardError]
|
|
105
|
+
def wait_while(delay: nil, exception: nil)
|
|
106
|
+
wait_until(exception:, delay:) { |remaining| !yield remaining }
|
|
107
|
+
end
|
|
108
|
+
end
|
|
109
|
+
|
|
110
|
+
NIL_TIMEOUT_CLOCK = TimeoutClock.new(nil)
|
|
111
|
+
end
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative 'timeout_clock'
|
|
4
|
+
|
|
5
|
+
module ConcurrentMonitor
|
|
6
|
+
# Extends anything with a 'wait(timeout=nil)' method with wait_while and wait_until
|
|
7
|
+
module WaitTimeout
|
|
8
|
+
# Repeatedly call wait(timeout) until the condition is true or the timeout duration is expired
|
|
9
|
+
# @param [TimeoutClock|Integer|nil] timeout
|
|
10
|
+
# duration in seconds to wait on the condition before timeout or nil to wait forever
|
|
11
|
+
# @param [Exception|nil] exception an exception to raise on timeout
|
|
12
|
+
# @return [Object] the truthy return value of the block
|
|
13
|
+
# @return [nil] if a timeout occurs
|
|
14
|
+
def wait_until(timeout = nil, exception: nil)
|
|
15
|
+
TimeoutClock.wait_until(timeout, exception:) { |remaining| yield || (wait(remaining) && false) }
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
# @return [void] - always falsey
|
|
19
|
+
# @see wait_until
|
|
20
|
+
def wait_while(*, **)
|
|
21
|
+
wait_until(*, **) { !yield }
|
|
22
|
+
end
|
|
23
|
+
end
|
|
24
|
+
end
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative 'concurrent_monitor/timeout_clock'
|
|
4
|
+
require_relative 'concurrent_monitor/task'
|
|
5
|
+
require_relative 'concurrent_monitor/condition_variable'
|
|
6
|
+
require_relative 'concurrent_monitor/queue'
|
|
7
|
+
require_relative 'concurrent_monitor/barrier'
|
|
8
|
+
require_relative 'concurrent_monitor/future'
|
|
9
|
+
require 'forwardable'
|
|
10
|
+
|
|
11
|
+
# A unified abstraction layer for synchronization and concurrency primitives that works
|
|
12
|
+
# consistently across both Thread and Fiber-based concurrency models.
|
|
13
|
+
# @example Class usage
|
|
14
|
+
# require 'concurrent_monitor'
|
|
15
|
+
# class MyConcurrentResource
|
|
16
|
+
# include ConcurrentMonitor
|
|
17
|
+
#
|
|
18
|
+
# def initialize(monitor:)
|
|
19
|
+
# self.monitor = monitor.new_monitor
|
|
20
|
+
# end
|
|
21
|
+
# end
|
|
22
|
+
# @example Async (Fiber-based) usage
|
|
23
|
+
# require 'async/monitor'
|
|
24
|
+
# resource = MyConcurrentResource.new(monitor: ConcurrentMonitor.async_monitor)
|
|
25
|
+
# @example Threaded usage
|
|
26
|
+
# require 'thread/monitor'
|
|
27
|
+
# resource = MyConcurrentResource.new(monitor: ConcurrentMonitor.thread_monitor)
|
|
28
|
+
module ConcurrentMonitor
|
|
29
|
+
class << self
|
|
30
|
+
def async_monitor
|
|
31
|
+
require 'async/monitor'
|
|
32
|
+
Async::Monitor
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
def thread_monitor
|
|
36
|
+
require 'thread/monitor'
|
|
37
|
+
Thread::Monitor
|
|
38
|
+
end
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
# Include general wait_until, wait_while
|
|
42
|
+
include TimeoutClock::Mixin
|
|
43
|
+
|
|
44
|
+
extend Forwardable
|
|
45
|
+
|
|
46
|
+
# @!attribute [rw] monitor
|
|
47
|
+
# @return [Async::Monitor,Thread::Monitor] should be set to an instance of Async::Monitor or Thread::Monitor
|
|
48
|
+
def_delegators :@monitor, :sync, :async, :current_task, :synchronize, :new_condition, :task_dump
|
|
49
|
+
|
|
50
|
+
# @!method new_monitor
|
|
51
|
+
# @return [Async::Monitor|Thread::Monitor] a new monitor of the same kind as the current one
|
|
52
|
+
|
|
53
|
+
# @!method async(name = nil, report_on_exception: true, &block)
|
|
54
|
+
# Run a task asynchronously
|
|
55
|
+
# @return [Task]
|
|
56
|
+
|
|
57
|
+
# @!method sync(name = nil, &block)
|
|
58
|
+
# Run a task immediately, starting a reactor if necessary
|
|
59
|
+
# @return [Object] the result of the block
|
|
60
|
+
|
|
61
|
+
# @!method current_task
|
|
62
|
+
# @return [Task] The current task which can be compared against other tasks
|
|
63
|
+
|
|
64
|
+
# @!method task_dump(io=$stderr)
|
|
65
|
+
# Dumps a list of running tasks, with backtrace, to the supplied IO
|
|
66
|
+
# @return [void]
|
|
67
|
+
|
|
68
|
+
# @!method synchronize(&block)
|
|
69
|
+
# Execute block as a (potentially re-entrant) critical section within this monitor
|
|
70
|
+
#
|
|
71
|
+
# Synchronisation principles:
|
|
72
|
+
# 1. Critical sections should be kept brief and focused on manipulating shared state.
|
|
73
|
+
# 2. Only yield control using monitor based primitives such as ConditionVariable / Queue
|
|
74
|
+
# 2. Always recheck state when resuming after potentially yielding control
|
|
75
|
+
#
|
|
76
|
+
# @note Async::Monitor#synchronize does not provide true mutual exclusion. However,
|
|
77
|
+
# Code proven to be thread-safe under Thread::Monitor will also be safe for use with Async::Monitor.
|
|
78
|
+
|
|
79
|
+
# @!method new_condition
|
|
80
|
+
# @return [ConditionVariable]
|
|
81
|
+
|
|
82
|
+
# Run a task with a timeout
|
|
83
|
+
# @param [Numeric|TimeoutClock] timeout
|
|
84
|
+
# @param [Class|StandardError|nil] exception if set the error to raise on timeout
|
|
85
|
+
# @return [Object] task result, nil if timed out without exception
|
|
86
|
+
def with_timeout(timeout, exception: nil, condition: new_condition, &block)
|
|
87
|
+
done = false
|
|
88
|
+
task = async do |t|
|
|
89
|
+
block.call(t)
|
|
90
|
+
ensure
|
|
91
|
+
synchronize do
|
|
92
|
+
done = true
|
|
93
|
+
condition.broadcast
|
|
94
|
+
end
|
|
95
|
+
end
|
|
96
|
+
|
|
97
|
+
begin
|
|
98
|
+
synchronize { condition.wait_until(timeout, exception:) { done } }
|
|
99
|
+
ensure
|
|
100
|
+
task.stop
|
|
101
|
+
end
|
|
102
|
+
|
|
103
|
+
task.value
|
|
104
|
+
end
|
|
105
|
+
|
|
106
|
+
# @return [Queue] a new queue synchronized on this monitor
|
|
107
|
+
def new_queue(monitor: self)
|
|
108
|
+
Queue.new(monitor:)
|
|
109
|
+
end
|
|
110
|
+
|
|
111
|
+
# Creates a new task barrier
|
|
112
|
+
# @return [Barrier]
|
|
113
|
+
def new_barrier(monitor: self)
|
|
114
|
+
Barrier.new(monitor:)
|
|
115
|
+
end
|
|
116
|
+
|
|
117
|
+
# @see Barrier#wait!
|
|
118
|
+
def with_barrier(monitor: self, &)
|
|
119
|
+
Barrier.wait!(monitor:, &)
|
|
120
|
+
end
|
|
121
|
+
|
|
122
|
+
def new_future(monitor: self)
|
|
123
|
+
Future.new(monitor:)
|
|
124
|
+
end
|
|
125
|
+
|
|
126
|
+
# @!attribute [rw] monitor
|
|
127
|
+
# @return [Thread::Monitor|Async::Monitor]
|
|
128
|
+
attr_accessor :monitor
|
|
129
|
+
end
|
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'monitor'
|
|
4
|
+
require_relative '../concurrent_monitor'
|
|
5
|
+
|
|
6
|
+
class Thread
|
|
7
|
+
# A concurrent Monitor based for Threads based on MonitorMixin
|
|
8
|
+
# This is the standalone monitor class.
|
|
9
|
+
# @see Monitor::Mixin
|
|
10
|
+
class Monitor
|
|
11
|
+
# rubocop:disable Lint/InheritException
|
|
12
|
+
|
|
13
|
+
# Raised from when a task is stopped
|
|
14
|
+
class Stop < Exception; end
|
|
15
|
+
|
|
16
|
+
# rubocop:enable Lint/InheritException
|
|
17
|
+
|
|
18
|
+
# @!visibility private
|
|
19
|
+
class ConditionVariable < MonitorMixin::ConditionVariable
|
|
20
|
+
include ConcurrentMonitor::ConditionVariable
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
# @!visibility private
|
|
24
|
+
# Common task interface over Thread
|
|
25
|
+
class Task < ConcurrentMonitor::Task
|
|
26
|
+
def initialize(name = nil, report_on_exception: true, &block)
|
|
27
|
+
super()
|
|
28
|
+
@stopped = nil
|
|
29
|
+
@thread = Thread.new(self, name, report_on_exception, block) do |t, n, e, b|
|
|
30
|
+
run_thread(t, n, e, &b)
|
|
31
|
+
rescue Stop
|
|
32
|
+
@stopped = true
|
|
33
|
+
nil
|
|
34
|
+
ensure
|
|
35
|
+
@stopped ||= false
|
|
36
|
+
end
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
def alive? = @thread.alive?
|
|
40
|
+
|
|
41
|
+
def current? = @thread == Thread.current
|
|
42
|
+
|
|
43
|
+
def value
|
|
44
|
+
return nil if @stopped
|
|
45
|
+
|
|
46
|
+
@thread.value
|
|
47
|
+
rescue Stop
|
|
48
|
+
@stopped = true
|
|
49
|
+
nil
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
def stop
|
|
53
|
+
raise Stop if current?
|
|
54
|
+
|
|
55
|
+
@thread.raise(Stop) if @thread.alive?
|
|
56
|
+
self
|
|
57
|
+
rescue ThreadError
|
|
58
|
+
self # race with alive?
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
# must call join or value before @stopped will be true
|
|
62
|
+
def stopped?
|
|
63
|
+
!!@stopped
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
def to_s = @thread.to_s
|
|
67
|
+
|
|
68
|
+
def inspect = @thread.inspect
|
|
69
|
+
|
|
70
|
+
private
|
|
71
|
+
|
|
72
|
+
def run_thread(task, name, report_on_exception)
|
|
73
|
+
Thread.current.tap do |c|
|
|
74
|
+
c.report_on_exception = report_on_exception
|
|
75
|
+
c.name = name.to_s if name
|
|
76
|
+
c.thread_variable_set(:concurrent_monitor_task, task)
|
|
77
|
+
end
|
|
78
|
+
yield task
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
# @!visibility private
|
|
83
|
+
# Module functions
|
|
84
|
+
module Functions
|
|
85
|
+
def new_monitor
|
|
86
|
+
Monitor.new
|
|
87
|
+
end
|
|
88
|
+
|
|
89
|
+
# @return [Task]
|
|
90
|
+
def async(...)
|
|
91
|
+
start_thread_group
|
|
92
|
+
Task.new(...)
|
|
93
|
+
end
|
|
94
|
+
|
|
95
|
+
# Just invokes the block
|
|
96
|
+
def sync(_name = nil, &block)
|
|
97
|
+
start_thread_group
|
|
98
|
+
block.call
|
|
99
|
+
end
|
|
100
|
+
|
|
101
|
+
def current_task
|
|
102
|
+
Thread.current.thread_variable_get(:concurrent_monitor_task)
|
|
103
|
+
end
|
|
104
|
+
|
|
105
|
+
def task_dump(io = $stderr)
|
|
106
|
+
io.puts "=== Thread Dump at #{Time.now} ==="
|
|
107
|
+
# Get the current thread group
|
|
108
|
+
group = Thread.current.group
|
|
109
|
+
|
|
110
|
+
if group == ThreadGroup::Default
|
|
111
|
+
io.puts 'Not in a Thread::Monitor group'
|
|
112
|
+
else
|
|
113
|
+
io.puts "Total Threads in #{group}: #{group.list.count}"
|
|
114
|
+
group.list.each do |thread|
|
|
115
|
+
io.puts "\n#{thread}"
|
|
116
|
+
io.puts "→ #{thread.backtrace.join("\n ")}" if thread.backtrace
|
|
117
|
+
end
|
|
118
|
+
end
|
|
119
|
+
end
|
|
120
|
+
|
|
121
|
+
private
|
|
122
|
+
|
|
123
|
+
def start_thread_group
|
|
124
|
+
ThreadGroup.new.add(Thread.current) if Thread.current.group == ThreadGroup::Default
|
|
125
|
+
end
|
|
126
|
+
end
|
|
127
|
+
|
|
128
|
+
def new_condition
|
|
129
|
+
# We're using the underlying @mon_data variable but giving ourselves
|
|
130
|
+
# the enhanced condition variable, which just saves one level of wrapping
|
|
131
|
+
ConditionVariable.new(@mon_data)
|
|
132
|
+
end
|
|
133
|
+
|
|
134
|
+
include MonitorMixin
|
|
135
|
+
|
|
136
|
+
include Functions
|
|
137
|
+
extend Functions
|
|
138
|
+
end
|
|
139
|
+
end
|
metadata
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
|
2
|
+
name: concurrent_monitor
|
|
3
|
+
version: !ruby/object:Gem::Version
|
|
4
|
+
version: 0.0.1.ci.release
|
|
5
|
+
platform: ruby
|
|
6
|
+
authors:
|
|
7
|
+
- Grant Gardner
|
|
8
|
+
bindir: bin
|
|
9
|
+
cert_chain: []
|
|
10
|
+
date: 1980-01-02 00:00:00.000000000 Z
|
|
11
|
+
dependencies: []
|
|
12
|
+
description: Simple monitor pattern with a common interface using Thread or Async::Task
|
|
13
|
+
email:
|
|
14
|
+
- grant@lastweekend.com.au
|
|
15
|
+
executables: []
|
|
16
|
+
extensions: []
|
|
17
|
+
extra_rdoc_files: []
|
|
18
|
+
files:
|
|
19
|
+
- lib/async/monitor.rb
|
|
20
|
+
- lib/concurrent_monitor.rb
|
|
21
|
+
- lib/concurrent_monitor/barrier.rb
|
|
22
|
+
- lib/concurrent_monitor/condition_variable.rb
|
|
23
|
+
- lib/concurrent_monitor/future.rb
|
|
24
|
+
- lib/concurrent_monitor/queue.rb
|
|
25
|
+
- lib/concurrent_monitor/semaphore.rb
|
|
26
|
+
- lib/concurrent_monitor/task.rb
|
|
27
|
+
- lib/concurrent_monitor/timeout_clock.rb
|
|
28
|
+
- lib/concurrent_monitor/version.rb
|
|
29
|
+
- lib/concurrent_monitor/wait_timeout.rb
|
|
30
|
+
- lib/thread/monitor.rb
|
|
31
|
+
licenses:
|
|
32
|
+
- MIT
|
|
33
|
+
metadata:
|
|
34
|
+
rubygems_mfa_required: 'true'
|
|
35
|
+
rdoc_options: []
|
|
36
|
+
require_paths:
|
|
37
|
+
- lib
|
|
38
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
|
39
|
+
requirements:
|
|
40
|
+
- - ">="
|
|
41
|
+
- !ruby/object:Gem::Version
|
|
42
|
+
version: '3.4'
|
|
43
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
|
44
|
+
requirements:
|
|
45
|
+
- - ">="
|
|
46
|
+
- !ruby/object:Gem::Version
|
|
47
|
+
version: '0'
|
|
48
|
+
requirements: []
|
|
49
|
+
rubygems_version: 3.6.9
|
|
50
|
+
specification_version: 4
|
|
51
|
+
summary: An abstract concurrent monitor framework
|
|
52
|
+
test_files: []
|