dispatch_queue_rb 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/Gemfile +13 -0
- data/LICENSE +22 -0
- data/README.md +98 -0
- data/dispatch_queue_rb.gemspec +43 -0
- data/lib/dispatch_queue_rb.rb +32 -0
- data/lib/dispatch_queue_rb/concurrent_queue.rb +116 -0
- data/lib/dispatch_queue_rb/dispatch.rb +66 -0
- data/lib/dispatch_queue_rb/dispatch_group.rb +72 -0
- data/lib/dispatch_queue_rb/internal/condition_variable_pool.rb +33 -0
- data/lib/dispatch_queue_rb/internal/continuation.rb +41 -0
- data/lib/dispatch_queue_rb/internal/heap.rb +88 -0
- data/lib/dispatch_queue_rb/internal/thread_pool_queue.rb +127 -0
- data/lib/dispatch_queue_rb/internal/thread_queue.rb +62 -0
- data/lib/dispatch_queue_rb/internal/timer_pool.rb +71 -0
- data/lib/dispatch_queue_rb/mixins/dispatch_after_impl.rb +18 -0
- data/lib/dispatch_queue_rb/mixins/dispatch_sync_impl.rb +42 -0
- data/lib/dispatch_queue_rb/serial_queue.rb +77 -0
- data/lib/dispatch_queue_rb/version.rb +12 -0
- data/rakefile.rb +110 -0
- data/test/_test_env.rb +52 -0
- data/test/test_concurrent_queue.rb +90 -0
- data/test/test_condition_variable_pool.rb +41 -0
- data/test/test_continuation.rb +23 -0
- data/test/test_dispatch.rb +91 -0
- data/test/test_dispatch_group.rb +59 -0
- data/test/test_group_concurrent_queue.rb +75 -0
- data/test/test_group_serial_queue.rb +33 -0
- data/test/test_group_thread_pool_queue.rb +34 -0
- data/test/test_heap.rb +58 -0
- data/test/test_serial_queue.rb +77 -0
- data/test/test_thread_pool_queue.rb +63 -0
- data/test/test_thread_queue.rb +77 -0
- data/test/test_timer_pool.rb +124 -0
- data/test/test_version.rb +155 -0
- metadata +181 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: e177e6f49e44b8c2e8aa0e34ffdb8acf2dc2a555
|
4
|
+
data.tar.gz: ce6d7aed534ffe9ccb29f604ea28358b58c70cbb
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: feb77bdb4577eb050c0c0e9544b3bcba7eeda6774638a4bfe1beb7e8dd04b4a30a778310b0bb2d7b085a178d4623d7e38948b88ffe156c157172155d69758cfc
|
7
|
+
data.tar.gz: f2687609268da36638c6aabbdea5dcbda827dc82559701774b550881ee6e3dec2f33f1bbb175e29641e7016ce0f42c52285d30c4b47db7904f667c24b78bd0aa
|
data/Gemfile
ADDED
@@ -0,0 +1,13 @@
|
|
1
|
+
# =============================================================================
|
2
|
+
#
|
3
|
+
# MODULE : Gemfile
|
4
|
+
# PROJECT : DispatchQueueRb
|
5
|
+
# DESCRIPTION :
|
6
|
+
#
|
7
|
+
# Copyright (c) 2016, Marc-Antoine Argenton. All rights reserved.
|
8
|
+
# =============================================================================
|
9
|
+
|
10
|
+
|
11
|
+
|
12
|
+
source 'https://rubygems.org'
|
13
|
+
gemspec
|
data/LICENSE
ADDED
@@ -0,0 +1,22 @@
|
|
1
|
+
Copyright (c) 2016, Marc-Antoine Argenton. All rights reserved.
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person
|
4
|
+
obtaining a copy of this software and associated documentation
|
5
|
+
files (the "Software"), to deal in the Software without
|
6
|
+
restriction, including without limitation the rights to use,
|
7
|
+
copy, modify, merge, publish, distribute, sublicense, and/or sell
|
8
|
+
copies of the Software, and to permit persons to whom the
|
9
|
+
Software is furnished to do so, subject to the following
|
10
|
+
conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be
|
13
|
+
included in all copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
16
|
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
17
|
+
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
18
|
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
19
|
+
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
20
|
+
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
21
|
+
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
22
|
+
OTHER DEALINGS IN THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,98 @@
|
|
1
|
+
# DispatchQueueRb
|
2
|
+
|
3
|
+
[](https://badge.fury.io/rb/dispatch_queue_rb)
|
4
|
+
[](https://travis-ci.org/marcus999/dispatch_queue_rb)
|
5
|
+
[](https://codeclimate.com/github/marcus999/dispatch_queue_rb)
|
6
|
+
[](https://codeclimate.com/github/marcus999/dispatch_queue_rb)
|
7
|
+
|
8
|
+
|
9
|
+
|
10
|
+
## Overview
|
11
|
+
|
12
|
+
DispatchQueueRb is a pure ruby implementation of the Grand Central Dispatch
|
13
|
+
concurrency primitives, using Ruby threads and blocking synchronization primitives
|
14
|
+
like Mutex and ConditionVariable.
|
15
|
+
|
16
|
+
It implements serial and concurrent queues, with synchronous, asynchronous,
|
17
|
+
barrier and delayed dispatch methods. All queues dispatch methods support an
|
18
|
+
optional dispatch groups to synchronize on completion of a group of tasks.
|
19
|
+
It also provides a thread pool based concurrent queue, scaled to the number
|
20
|
+
of available cpu cores, and used by default to schedule the actual work.
|
21
|
+
|
22
|
+
Beside a highly optimized lock-free C implementation with libdispatch,
|
23
|
+
Grand Central Dispatch is a shift of paradigm that expresses concurrent
|
24
|
+
programming concepts in terms of tasks that must be serialized with respect to
|
25
|
+
each other and tasks that can be performed concurrently.
|
26
|
+
|
27
|
+
Using threads, concurrency is usually defining with a fixed set of threads that
|
28
|
+
perform specific tasks, and by passing data between threads using message queues
|
29
|
+
and producer / consumer patterns. The flow of data and the sequence of
|
30
|
+
operations is often very inflexible because what threads do and where they
|
31
|
+
write their result to is often frozen at design time.
|
32
|
+
|
33
|
+
With dispatch queues, concurrency is expressed by scheduling work items to
|
34
|
+
serial and concurrent queues. The work that needs to be performed in the
|
35
|
+
context of a queue is not frozen in a dedicated thread, but passed as a block
|
36
|
+
of code to the dispatch method call. That block of code captures the data it
|
37
|
+
needs to execute, knows how to access global immutable data, and defines what
|
38
|
+
to do with the result.
|
39
|
+
|
40
|
+
|
41
|
+
### Implements:
|
42
|
+
- SerialQueue and ConcurrentQueue
|
43
|
+
- dispatch_async() and dispatch_sync()
|
44
|
+
- dispatch_barrier_async() and dispatch_barrier_sync()
|
45
|
+
- dispatch_after()
|
46
|
+
- Dispatch.main_queue is a global serial queue attached to a single thread
|
47
|
+
(but not the main thread).
|
48
|
+
- Dispatch.default_queue is a global concurrent queue that is implemented as
|
49
|
+
a thread pool, scaled to the number of available cpu cores. It is the
|
50
|
+
default parent_queue for all private queues.
|
51
|
+
|
52
|
+
### Key differences and not supported features:
|
53
|
+
- Implemented with Ruby threading primitives (Mutex, ConditionVariable) instead
|
54
|
+
of high-performance lock-free algorithms
|
55
|
+
- Aimed at MRI ruby 2.x, where ruby code cannot execute concurrently across
|
56
|
+
multiple cpu cores. Mostly useful to manage parallel sub-processes execution.
|
57
|
+
- Dispatch.default_queue does not monitor thread activity and does not spawn
|
58
|
+
more threads to compensate for blocked threads.
|
59
|
+
|
60
|
+
### Version 1.0 limitations:
|
61
|
+
- Does not implement an equivalent for dispatch_source primitives.
|
62
|
+
- Does not support suspend / resume operations
|
63
|
+
- Does not provide multiple priority levels for global queues
|
64
|
+
|
65
|
+
|
66
|
+
## Installation
|
67
|
+
|
68
|
+
Add this line to your application's Gemfile:
|
69
|
+
|
70
|
+
```ruby
|
71
|
+
gem 'dispatch_queue_rb'
|
72
|
+
```
|
73
|
+
|
74
|
+
And then execute:
|
75
|
+
|
76
|
+
$ bundle
|
77
|
+
|
78
|
+
Or install it yourself as:
|
79
|
+
|
80
|
+
$ gem install dispatch_queue_rb
|
81
|
+
|
82
|
+
## Usage
|
83
|
+
|
84
|
+
TODO: Write usage instructions here
|
85
|
+
|
86
|
+
## Development
|
87
|
+
|
88
|
+
After checking out the repo, run `bin/setup` to install dependencies. Then, run `bin/console` for an interactive prompt that will allow you to experiment.
|
89
|
+
|
90
|
+
To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release` to create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](https://rubygems.org).
|
91
|
+
|
92
|
+
## Contributing
|
93
|
+
|
94
|
+
1. Fork it ( https://github.com/[my-github-username]/dispatch_queue_rb/fork )
|
95
|
+
2. Create your feature branch (`git checkout -b my-new-feature`)
|
96
|
+
3. Commit your changes (`git commit -am 'Add some feature'`)
|
97
|
+
4. Push to the branch (`git push origin my-new-feature`)
|
98
|
+
5. Create a new Pull Request
|
@@ -0,0 +1,43 @@
|
|
1
|
+
# =============================================================================
|
2
|
+
#
|
3
|
+
# MODULE : dispatch_queue_rb.gemspec
|
4
|
+
# PROJECT : DispatchQueueRb
|
5
|
+
# DESCRIPTION :
|
6
|
+
#
|
7
|
+
# Copyright (c) 2016, Marc-Antoine Argenton. All rights reserved.
|
8
|
+
# =============================================================================
|
9
|
+
|
10
|
+
|
11
|
+
require_relative 'lib/dispatch_queue_rb/version.rb'
|
12
|
+
|
13
|
+
Gem::Specification.new do |spec|
|
14
|
+
spec.name = 'dispatch_queue_rb'
|
15
|
+
spec.version = DispatchQueue::VERSION
|
16
|
+
spec.authors = ["Marc-Antoine Argenton"]
|
17
|
+
spec.email = ["maargenton.dev@gmail.com"]
|
18
|
+
spec.summary = "Pure ruby implementation of Grand Central Dispatch concurrency primitives."
|
19
|
+
spec.description = %q{
|
20
|
+
DispatchQueueRb is a pure ruby implementation of Grand Central Dispatch concurrency primitives.
|
21
|
+
It implements serial and concurrent queues, with synchronous, asynchronous,
|
22
|
+
barrier and delayed dispatch methods. All queues dispatch methods support an
|
23
|
+
optional dispatch groups to synchronize on completion of a group of tasks.
|
24
|
+
It also provides a thread pool based concurrent queue, scaled to the number
|
25
|
+
of available cpu cores, and used by default to schedule the actual work.
|
26
|
+
}.gsub( /\s+/, ' ').strip
|
27
|
+
spec.homepage = "https://github.com/marcus999/dispatch_queue_rb"
|
28
|
+
|
29
|
+
spec.files = Dir['[A-Z]*', 'rakefile.rb', '*.gemspec'].reject { |f| f =~ /.lock/ }
|
30
|
+
spec.files += Dir['bin/**', 'lib/**/*.rb', 'test/**/*.rb', 'spec/**/*.rb', 'features/**/*.rb']
|
31
|
+
spec.executables = spec.files.grep( %r{^bin/} ) { |f| File.basename(f) }
|
32
|
+
spec.test_files = spec.files.grep( %r{^(test|spec|features)/} )
|
33
|
+
|
34
|
+
# spec.add_runtime_dependency 'facets', '~> 3.0'
|
35
|
+
# spec.add_runtime_dependency 'mustache', '~> 1.0'
|
36
|
+
|
37
|
+
spec.add_development_dependency 'bundler', '~> 1.7'
|
38
|
+
spec.add_development_dependency 'rake', '~> 10.0'
|
39
|
+
spec.add_development_dependency 'watch', '~> 0.1'
|
40
|
+
spec.add_development_dependency 'rr', '~> 1.1'
|
41
|
+
spec.add_development_dependency 'minitest', '~> 5.3'
|
42
|
+
spec.add_development_dependency 'minitest-reporters', '~> 1.1'
|
43
|
+
end
|
@@ -0,0 +1,32 @@
|
|
1
|
+
# =============================================================================
|
2
|
+
#
|
3
|
+
# MODULE : lib/dispatch_queue_rb.rb
|
4
|
+
# PROJECT : DispatchQueueRb
|
5
|
+
# DESCRIPTION :
|
6
|
+
#
|
7
|
+
# Copyright (c) 2016, Marc-Antoine Argenton. All rights reserved.
|
8
|
+
# =============================================================================
|
9
|
+
|
10
|
+
require 'set'
|
11
|
+
|
12
|
+
require_relative 'dispatch_queue_rb/version.rb'
|
13
|
+
|
14
|
+
require_relative 'dispatch_queue_rb/mixins/dispatch_sync_impl.rb'
|
15
|
+
require_relative 'dispatch_queue_rb/mixins/dispatch_after_impl.rb'
|
16
|
+
|
17
|
+
require_relative 'dispatch_queue_rb/internal/condition_variable_pool.rb'
|
18
|
+
require_relative 'dispatch_queue_rb/internal/heap.rb'
|
19
|
+
require_relative 'dispatch_queue_rb/internal/continuation.rb'
|
20
|
+
require_relative 'dispatch_queue_rb/internal/thread_pool_queue.rb'
|
21
|
+
require_relative 'dispatch_queue_rb/internal/timer_pool.rb'
|
22
|
+
require_relative 'dispatch_queue_rb/internal/thread_queue.rb'
|
23
|
+
|
24
|
+
require_relative 'dispatch_queue_rb/dispatch.rb'
|
25
|
+
require_relative 'dispatch_queue_rb/serial_queue.rb'
|
26
|
+
require_relative 'dispatch_queue_rb/concurrent_queue.rb'
|
27
|
+
require_relative 'dispatch_queue_rb/dispatch_group.rb'
|
28
|
+
|
29
|
+
Dispatch = DispatchQueue::Dispatch
|
30
|
+
SerialQueue = DispatchQueue::SerialQueue
|
31
|
+
ConcurrentQueue = DispatchQueue::ConcurrentQueue
|
32
|
+
DispatchGroup = DispatchQueue::DispatchGroup
|
@@ -0,0 +1,116 @@
|
|
1
|
+
# =============================================================================
|
2
|
+
#
|
3
|
+
# MODULE : lib/dispatch_queue_rb/concurrent_queue.rb
|
4
|
+
# PROJECT : DispatchQueue
|
5
|
+
# DESCRIPTION :
|
6
|
+
#
|
7
|
+
# Copyright (c) 2016, Marc-Antoine Argenton. All rights reserved.
|
8
|
+
# =============================================================================
|
9
|
+
|
10
|
+
module DispatchQueue
|
11
|
+
class ConcurrentQueue
|
12
|
+
|
13
|
+
def initialize( parent_queue: nil )
|
14
|
+
@mutex = Mutex.new
|
15
|
+
@condition = ConditionVariable.new
|
16
|
+
@task_list = []
|
17
|
+
@parent_queue = parent_queue || Dispatch.default_queue
|
18
|
+
@scheduled_count = 0
|
19
|
+
@barrier_count = 0
|
20
|
+
end
|
21
|
+
|
22
|
+
def dispatch_async( group:nil, &task )
|
23
|
+
group.enter() if group
|
24
|
+
continuation = Continuation.new( target_queue:@parent_queue, group:group ) do
|
25
|
+
_run_task( task, false )
|
26
|
+
end
|
27
|
+
|
28
|
+
schedule_immediately = @mutex.synchronize do
|
29
|
+
if ( @barrier_count > 0)
|
30
|
+
@task_list << continuation
|
31
|
+
false
|
32
|
+
else
|
33
|
+
@scheduled_count += 1
|
34
|
+
true
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
continuation.run() if schedule_immediately
|
39
|
+
self
|
40
|
+
end
|
41
|
+
|
42
|
+
def dispatch_barrier_async( group:nil, &task )
|
43
|
+
group.enter() if group
|
44
|
+
continuation = Continuation.new( target_queue:@parent_queue, group:group, barrier:true ) do
|
45
|
+
_run_task( task, true )
|
46
|
+
end
|
47
|
+
|
48
|
+
barrier_task, tasks = @mutex.synchronize do
|
49
|
+
@barrier_count += 1
|
50
|
+
@task_list << continuation
|
51
|
+
resume_pending = ( @scheduled_count == 0 && @barrier_count == 1)
|
52
|
+
_sync_get_next_batch() if resume_pending
|
53
|
+
end
|
54
|
+
|
55
|
+
_schedule_next_batch( barrier_task, tasks )
|
56
|
+
end
|
57
|
+
|
58
|
+
include DispatchSyncImpl
|
59
|
+
include DispatchAfterImpl
|
60
|
+
|
61
|
+
|
62
|
+
# def _debug_trace_queue_state( prefix = "" )
|
63
|
+
# puts "%-35s | scheduled: %3d, barrier: %3d, queued: %3d, barrier_head: %-5s" % [
|
64
|
+
# prefix,
|
65
|
+
# @scheduled_count,
|
66
|
+
# @barrier_count,
|
67
|
+
# @task_list.count,
|
68
|
+
# !@task_list.empty? && @task_list.first[1],
|
69
|
+
# ]
|
70
|
+
# end
|
71
|
+
|
72
|
+
private
|
73
|
+
def _run_task( task, barrier )
|
74
|
+
previous_queue = Thread.current[:current_queue]
|
75
|
+
Thread.current[:current_queue] = self
|
76
|
+
|
77
|
+
begin
|
78
|
+
task.call()
|
79
|
+
ensure
|
80
|
+
Thread.current[:current_queue] = previous_queue
|
81
|
+
_task_comleted( barrier )
|
82
|
+
end
|
83
|
+
end
|
84
|
+
|
85
|
+
def _task_comleted( barrier = false )
|
86
|
+
barrier_task, tasks = @mutex.synchronize do
|
87
|
+
resume_pending = barrier || ((@scheduled_count -= 1) == 0)
|
88
|
+
@barrier_count -= 1 if barrier
|
89
|
+
_sync_get_next_batch() if resume_pending
|
90
|
+
end
|
91
|
+
|
92
|
+
_schedule_next_batch( barrier_task, tasks )
|
93
|
+
end
|
94
|
+
|
95
|
+
def _sync_get_next_batch()
|
96
|
+
return nil if @task_list.empty?
|
97
|
+
return @task_list.shift if @task_list.first.barrier
|
98
|
+
|
99
|
+
tasks = []
|
100
|
+
tasks << @task_list.shift while !@task_list.first.barrier
|
101
|
+
@scheduled_count += tasks.count
|
102
|
+
return [nil, tasks]
|
103
|
+
end
|
104
|
+
|
105
|
+
def _schedule_next_batch( barrier_task, tasks )
|
106
|
+
if barrier_task
|
107
|
+
barrier_task.run()
|
108
|
+
elsif tasks
|
109
|
+
tasks.each do |t|
|
110
|
+
t.run()
|
111
|
+
end
|
112
|
+
end
|
113
|
+
end
|
114
|
+
|
115
|
+
end # class ConcurrentQueue
|
116
|
+
end # module DispatchQueue
|
@@ -0,0 +1,66 @@
|
|
1
|
+
# =============================================================================
|
2
|
+
#
|
3
|
+
# MODULE : lib/dispatch_queue_rb/dispatch.rb
|
4
|
+
# PROJECT : DispatchQueue
|
5
|
+
# DESCRIPTION :
|
6
|
+
#
|
7
|
+
# Copyright (c) 2016, Marc-Antoine Argenton. All rights reserved.
|
8
|
+
# =============================================================================
|
9
|
+
|
10
|
+
module DispatchQueue
|
11
|
+
module Dispatch
|
12
|
+
|
13
|
+
Result = Struct.new( :value )
|
14
|
+
|
15
|
+
class << self
|
16
|
+
def ncpu()
|
17
|
+
@@ncpu ||= `sysctl -n hw.ncpu`.to_i rescue 1
|
18
|
+
end
|
19
|
+
|
20
|
+
def default_queue
|
21
|
+
@@default_queue
|
22
|
+
end
|
23
|
+
|
24
|
+
def main_queue
|
25
|
+
@@main_queue
|
26
|
+
end
|
27
|
+
|
28
|
+
def synchronize()
|
29
|
+
mutex, condition = ConditionVariablePool.acquire()
|
30
|
+
result = nil
|
31
|
+
result_handler = Proc.new { |r|
|
32
|
+
result = r;
|
33
|
+
mutex.synchronize { condition.signal() }
|
34
|
+
}
|
35
|
+
mutex.synchronize do
|
36
|
+
yield result_handler
|
37
|
+
condition.wait( mutex )
|
38
|
+
end
|
39
|
+
ConditionVariablePool.release( mutex, condition )
|
40
|
+
result
|
41
|
+
end
|
42
|
+
|
43
|
+
def concurrent_map( input_array, target_queue:nil, &task )
|
44
|
+
group = DispatchGroup.new
|
45
|
+
target_queue ||= default_queue
|
46
|
+
|
47
|
+
output_results = input_array.map do |e|
|
48
|
+
result = Result.new
|
49
|
+
target_queue.dispatch_async( group:group ) do
|
50
|
+
result.value = task.call( e )
|
51
|
+
end
|
52
|
+
result
|
53
|
+
end
|
54
|
+
|
55
|
+
group.wait()
|
56
|
+
output_results.map { |result| result.value }
|
57
|
+
end
|
58
|
+
|
59
|
+
|
60
|
+
private
|
61
|
+
@@default_queue = ThreadPoolQueue.new()
|
62
|
+
@@main_queue = ThreadQueue.new()
|
63
|
+
|
64
|
+
end # class << self
|
65
|
+
end # class Dispatch
|
66
|
+
end # module DispatchQueue
|
@@ -0,0 +1,72 @@
|
|
1
|
+
# =============================================================================
|
2
|
+
#
|
3
|
+
# MODULE : lib/dispatch_queue_rb/dispatch_group.rb
|
4
|
+
# PROJECT : DispatchQueue
|
5
|
+
# DESCRIPTION :
|
6
|
+
#
|
7
|
+
# Copyright (c) 2016, Marc-Antoine Argenton. All rights reserved.
|
8
|
+
# =============================================================================
|
9
|
+
|
10
|
+
module DispatchQueue
|
11
|
+
class DispatchGroup
|
12
|
+
def initialize()
|
13
|
+
@mutex = Mutex.new
|
14
|
+
@condition = ConditionVariable.new
|
15
|
+
@count = 0
|
16
|
+
@notify_list = []
|
17
|
+
end
|
18
|
+
|
19
|
+
def enter()
|
20
|
+
@mutex.synchronize { @count += 1 }
|
21
|
+
self
|
22
|
+
end
|
23
|
+
|
24
|
+
def leave()
|
25
|
+
notify_list = @mutex.synchronize do
|
26
|
+
@count -= 1
|
27
|
+
raise "Unbalanced calls to DispatchGroup.enter() / .leave()" if @count < 0
|
28
|
+
if @count == 0
|
29
|
+
@condition.broadcast()
|
30
|
+
_sync_swap_notify_list()
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
_schedule_notify_list( notify_list ) if notify_list
|
35
|
+
self
|
36
|
+
end
|
37
|
+
|
38
|
+
def notify( target_queue:nil, barrier:false, group:nil, &task )
|
39
|
+
continuation = Continuation.new( target_queue:target_queue,
|
40
|
+
barrier:barrier, group:group, &task )
|
41
|
+
@mutex.synchronize do
|
42
|
+
if @count == 0
|
43
|
+
continuation.run( default_target_queue:Dispatch.default_queue )
|
44
|
+
else
|
45
|
+
@notify_list << continuation
|
46
|
+
end
|
47
|
+
end
|
48
|
+
self
|
49
|
+
end
|
50
|
+
|
51
|
+
def wait( timeout:nil )
|
52
|
+
@mutex.synchronize do
|
53
|
+
return true if @count == 0
|
54
|
+
@condition.wait( @mutex, timeout )
|
55
|
+
return @count == 0
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
private
|
60
|
+
def _sync_swap_notify_list()
|
61
|
+
return nil if @notify_list.empty?
|
62
|
+
notify_list = @notify_list
|
63
|
+
@notify_list = []
|
64
|
+
return notify_list
|
65
|
+
end
|
66
|
+
|
67
|
+
def _schedule_notify_list( notify_list )
|
68
|
+
notify_list.each { |continuation| continuation.run() }
|
69
|
+
end
|
70
|
+
|
71
|
+
end # class DispatchGroup
|
72
|
+
end # module DispatchQueue
|