dispatch 0.0.1pre

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,19 @@
1
+ # A GCD-based implementation of the sleeping barber problem:
2
+ # http://en.wikipedia.org/wiki/Sleeping_barber_problem
3
+ # http://www.madebysofa.com/#blog/the_sleeping_barber
4
+
5
+ waiting_chairs = Dispatch::Queue.new('com.apple.waiting_chairs')
6
+ semaphore = Dispatch::Semaphore.new(3)
7
+ index = -1
8
+ while true
9
+ index += 1
10
+ success = semaphore.wait(Dispatch::TIME_NOW)
11
+ if success != 0
12
+ puts "Customer turned away #{index}"
13
+ next
14
+ end
15
+ waiting_chairs.async do
16
+ semaphore.signal
17
+ puts "Shave and a haircut #{index}"
18
+ end
19
+ end
data/lib/dispatch.rb ADDED
@@ -0,0 +1,22 @@
1
+ #
2
+ # dispatch.rb - Grand Central Dispatch support library
3
+ #
4
+ # Copyright (C) 2010 Apple, Inc.
5
+ #
6
+ # == Overview ==
7
+ #
8
+ # Grand Central Dispatch (GCD) is a novel approach to multicore computing
9
+ # first released in Mac OS X version 10.6 Snow Leopard.
10
+ # The Dispatch module and associated classes (Queue, Group, Semaphore, Source)
11
+ # in MacRuby core provides a simple wrapping on top of the libdispatch C API.
12
+ # This library provides higher-level services and convenience methods
13
+ # to make it easier for traditional Ruby programmers to add multicore support.
14
+
15
+ libdir = File.expand_path(File.dirname(__FILE__))
16
+ $LOAD_PATH.unshift(libdir) unless $LOAD_PATH.include?(libdir)
17
+
18
+ require 'dispatch/source'
19
+ require 'dispatch/queue'
20
+ require 'dispatch/proxy'
21
+ require 'dispatch/job'
22
+ require 'dispatch/enumerable'
@@ -0,0 +1,108 @@
1
+ # Additional parallel operations for any object supporting +each+
2
+
3
+ module Dispatch
4
+ class Queue
5
+ def fake_apply(n, &block)
6
+ g = Dispatch::Group.new
7
+ n.times do |i|
8
+ async(g) { block.call(i) }
9
+ end
10
+ g.wait
11
+ end
12
+ end
13
+ end
14
+
15
+
16
+ class Integer
17
+ # Applies the +&block+ +Integer+ number of times in parallel
18
+ # -- passing in stride (default 1) iterations at a time --
19
+ # on a concurrent queue of the given (optional) +priority+
20
+ #
21
+ # @sum = 0
22
+ # 10.p_times(3) { |j| @sum += j }
23
+ # p @sum # => 55
24
+ #
25
+ def p_times(stride=1, priority=nil, &block)
26
+ n_times = self.to_int
27
+ q = Dispatch::Queue.concurrent(priority)
28
+ return q.apply(n_times, &block) if stride == 1
29
+
30
+ n_strides = (n_times / stride).to_int
31
+ block_from = Proc.new do |j0|
32
+ lambda { |j| block.call(j0+j) }
33
+ end
34
+ q.fake_apply(n_strides) { |i| stride.times &block_from.call(i*stride) }
35
+ # Runs the remainder (if any) sequentially on the current thread
36
+ (n_times % stride).times &block_from.call(n_strides*stride)
37
+ end
38
+ end
39
+
40
+ module Enumerable
41
+
42
+ # Parallel +each+
43
+ def p_each(stride=1, priority=nil, &block)
44
+ ary = self.to_a
45
+ ary.count.p_times(stride, priority) { |i| block.call(ary[i]) }
46
+ end
47
+
48
+ # Parallel +each+
49
+ def p_each_with_index(stride=1, priority=nil, &block)
50
+ ary = self.to_a
51
+ ary.count.p_times(stride, priority) { |i| block.call(ary[i], i) }
52
+ end
53
+
54
+ # Parallel +collect+
55
+ # Results match the order of the original array
56
+ def p_map(stride=1, priority=nil, &block)
57
+ @p_map_result = Dispatch::Proxy.new([])
58
+ @p_map_result_q ||= Dispatch::Queue.for(@p_map_result)
59
+ @p_map_result_q.sync do
60
+ self.p_each_with_index(stride, priority) { |obj, i| @p_map_result[i] = block.call(obj) }
61
+ end
62
+ @p_map_result.__value__
63
+ end
64
+
65
+ # Parallel +collect+ plus +inject+
66
+ # Accumulates from +initial+ via +op+ (default = '+')
67
+ # Note: each object can only run one p_mapreduce at a time
68
+ def p_mapreduce(initial, op=:+, stride=1, priority=nil, &block)
69
+ # Check first, since exceptions from a Dispatch block can act funky
70
+ raise ArgumentError if not initial.respond_to? op
71
+ @mapreduce_q ||= Dispatch::Queue.for(self)
72
+ @mapreduce_q.sync do # in case called more than once at a time
73
+ @mapreduce_result = initial
74
+ q = Dispatch::Queue.for(@mapreduce_result)
75
+ self.p_each(stride, priority) do |obj|
76
+ val = block.call(obj)
77
+ q.async { @mapreduce_result = @mapreduce_result.send(op, val) }
78
+ end
79
+ q.sync {}
80
+ return @mapreduce_result
81
+ end
82
+ end
83
+
84
+ # Parallel +select+; will return array of objects for which
85
+ # +&block+ returns true.
86
+ def p_find_all(stride=1, priority=nil, &block)
87
+ found_all = Dispatch::Proxy.new([])
88
+ self.p_each(stride, priority) { |obj| found_all << obj if block.call(obj) }
89
+ found_all.__value__
90
+ end
91
+
92
+ # Parallel +detect+; will return -one- match for +&block+
93
+ # but it may not be the 'first'
94
+ # Only useful if the test block is very expensive to run
95
+ # Note: each object can only run one p_find at a time
96
+ def p_find(stride=1, priority=nil, &block)
97
+ @find_q ||= Dispatch::Queue.for(self)
98
+ @find_q.sync do
99
+ @find_result = nil
100
+ q = Dispatch::Queue.for(@find_result)
101
+ self.p_each(stride, priority) do |obj|
102
+ q.async { @find_result = obj } if @find_result.nil? and block.call(obj)
103
+ end
104
+ q.sync {}
105
+ return @find_result
106
+ end
107
+ end
108
+ end
@@ -0,0 +1,52 @@
1
+ module Dispatch
2
+
3
+ # Track completion and return values of asynchronous requests
4
+ # Duck-type +join+ and +value+ from +Thread+
5
+ class Job
6
+ # Create a Job that asynchronously dispatches the block
7
+ attr_reader :group, :values
8
+
9
+ def initialize(queue = Dispatch::Queue.concurrent, &block)
10
+ @queue = queue
11
+ @group = Group.new
12
+ @values = synchronize([])
13
+ add(&block) if not block.nil?
14
+ end
15
+
16
+ def synchronize(obj)
17
+ Dispatch::Proxy.new(obj, @group)
18
+ end
19
+
20
+ # Submit block as part of the same dispatch group
21
+ def add(&block)
22
+ @queue.async(@group) { @values << block.call }
23
+ end
24
+
25
+ # Wait until execution has completed.
26
+ # If a +block+ is passed, invoke that asynchronously
27
+ # on the specified +queue+ (or else the default queue).
28
+ def join(queue = Dispatch::Queue.concurrent, &block)
29
+ return group.wait if block.nil?
30
+ group.notify(queue) { block.call }
31
+ end
32
+
33
+ # Wait then return the next value; note: only ordered if a serial queue
34
+ # If a +block+ is passed, invoke that asynchronously with the value
35
+ # on the specified +queue+ (or else the default queue).
36
+ def value(queue = Dispatch::Queue.concurrent, &block)
37
+ return group.notify(queue) { block.call(result) } if not block.nil?
38
+ group.wait
39
+ return result
40
+ end
41
+
42
+ alias_method :sync, :synchronize
43
+
44
+ private
45
+
46
+ # Remove and return the first value
47
+ def result
48
+ @values[-1]
49
+ end
50
+
51
+ end
52
+ end
@@ -0,0 +1,49 @@
1
+ require 'delegate'
2
+
3
+ module Dispatch
4
+ # Serialize or asynchronize access to a delegate object.
5
+ # Forwards method invocations to the passed object via a private serial queue,
6
+ # and can call back asynchronously if given a block
7
+ #
8
+ class Proxy < SimpleDelegator
9
+
10
+ attr_accessor :__group__, :__queue__, :__sync__
11
+
12
+ # Create Proxy to wrap the given +delegate+,
13
+ # optionally specify +group+ and +queue+ for asynchronous callbacks
14
+ def initialize(delegate, group=Group.new, queue=Dispatch::Queue.concurrent)
15
+ super(delegate)
16
+ @__serial__ = Dispatch::Queue.for(self)
17
+ @__group__ = group
18
+ @__queue__ = queue
19
+ @__retval__ = nil
20
+ end
21
+
22
+ # Call methods on the +delegate+ object via a private serial queue
23
+ # Returns asychronously if given a block; else synchronously
24
+ #
25
+ def method_missing(symbol, *args, &block)
26
+ if block.nil? then
27
+ @__serial__.sync { @__retval__ = __getobj__.__send__(symbol,*args) }
28
+ return @__retval__
29
+ end
30
+ queue = @__queue__ # copy in case it changes while in flight
31
+ @__serial__.async(@__group__) do
32
+ retval = __getobj__.__send__(symbol, *args)
33
+ queue.async(@__group__) { block.call(retval) }
34
+ end
35
+ end
36
+
37
+ # Wait until the internal private queue has completed pending executions
38
+ def __wait__
39
+ @__serial__.sync { }
40
+ end
41
+
42
+ # Return the +delegate+ object after waiting
43
+ def __value__
44
+ __wait__
45
+ __getobj__
46
+ end
47
+
48
+ end
49
+ end
@@ -0,0 +1,34 @@
1
+ # Adds convenience methods to Queues
2
+
3
+ module Dispatch
4
+ class Queue
5
+
6
+ # Returns a mostly unique reverse-DNS-style label based on
7
+ # the ancestor chain and ID of +obj+ plus the current time
8
+ #
9
+ # Dispatch::Queue.labelize(Array.new)
10
+ # => enumerable.array.0x2000cc2c0.1265915278.97557
11
+ #
12
+ def self.labelize(obj)
13
+ names = obj.class.ancestors[0...-2].map {|a| a.to_s.downcase}
14
+ label = names.uniq.reverse.join(".")
15
+ "#{label}.0x%x.#{Time.now.to_f}" % obj.object_id
16
+ end
17
+
18
+ # Returns a new serial queue with a unique label based on +obj+
19
+ # Typically used to serialize access to that object
20
+ #
21
+ # a = Array.new
22
+ # q = Dispatch::Queue.for(a)
23
+ # q.async { a << 2 }
24
+ #
25
+ def self.for(obj)
26
+ new(labelize(obj))
27
+ end
28
+
29
+ # Wait until pending blocks have completed
30
+ def join
31
+ sync {}
32
+ end
33
+ end
34
+ end
@@ -0,0 +1,83 @@
1
+ # Adds convenience methods to Queues for creating Sources
2
+
3
+ module Dispatch
4
+ class Source
5
+
6
+ @@events = {
7
+ exit:PROC_EXIT,
8
+ fork:PROC_FORK,
9
+ exec:PROC_EXEC,
10
+ signal:PROC_SIGNAL,
11
+
12
+ delete:VNODE_DELETE,
13
+ write:VNODE_WRITE,
14
+ extend:VNODE_EXTEND,
15
+ attrib:VNODE_ATTRIB,
16
+ link:VNODE_LINK,
17
+ rename:VNODE_RENAME,
18
+ revoke:VNODE_REVOKE
19
+ }
20
+
21
+ class << self
22
+
23
+ def event2num(e)
24
+ return 0 if e.nil?
25
+ value = e.to_int rescue @@events[e.to_sym]
26
+ raise ArgumentError, "No event type #{e.inspect}" if value.nil?
27
+ value
28
+ end
29
+
30
+ def events2mask(events)
31
+ mask = events.collect { |e| event2num(e) }.reduce(:|)
32
+ end
33
+
34
+ def data2events(bitmask)
35
+ @@events.collect { |k,v| k if (v & bitmask) > 0 }.compact
36
+ end
37
+
38
+ # Returns Dispatch::Source of type DATA_ADD
39
+ def add(queue = Dispatch::Queue.concurrent, &block)
40
+ Dispatch::Source.new(Dispatch::Source::DATA_ADD, 0, 0, queue, &block)
41
+ end
42
+
43
+ # Returns Dispatch::Source of type DATA_OR
44
+ def or(queue = Dispatch::Queue.concurrent, &block)
45
+ Dispatch::Source.new(Dispatch::Source::DATA_OR, 0, 0, queue, &block)
46
+ end
47
+
48
+ # Takes events: :delete, :write, :extend, :attrib, :link, :rename, :revoke
49
+ # Returns Dispatch::Source of type PROC
50
+ def process(pid, events, queue = Dispatch::Queue.concurrent, &block)
51
+ events = events2mask(events) if not events.respond_to? :to_int
52
+ Dispatch::Source.new(Dispatch::Source::PROC, pid, events, queue, &block)
53
+ end
54
+
55
+ # Returns Dispatch::Source of type SIGNAL
56
+ def signal(signal, queue = Dispatch::Queue.concurrent, &block)
57
+ signal = Signal.list[signal.to_s] if signal.to_i == 0
58
+ Dispatch::Source.new(Dispatch::Source::SIGNAL, signal, 0, queue, &block)
59
+ end
60
+
61
+ # Returns Dispatch::Source of type READ
62
+ def read(file, queue = Dispatch::Queue.concurrent, &block)
63
+ Dispatch::Source.new(Dispatch::Source::READ, file, 0, queue, &block)
64
+ end
65
+
66
+ # Returns Dispatch::Source of type WRITE
67
+ def write(file, queue = Dispatch::Queue.concurrent, &block)
68
+ Dispatch::Source.new(Dispatch::Source::WRITE, file, 0, queue, &block)
69
+ end
70
+
71
+ # Takes events: :exit, :fork, :exec, :signal
72
+ # Returns Dispatch::Source of type VNODE
73
+ def file(file, events, queue = Dispatch::Queue.concurrent, &block)
74
+ events = events2mask(events) if not events.respond_to? :to_int
75
+ Dispatch::Source.new(Dispatch::Source::VNODE, file, events, queue, &block)
76
+ end
77
+
78
+ def periodic(seconds, queue = Dispatch::Queue.concurrent, &block)
79
+ Dispatch::Source.timer(0, seconds, 0, queue, &block)
80
+ end
81
+ end
82
+ end
83
+ end
@@ -0,0 +1,3 @@
1
+ module Dispatch
2
+ VERSION = "0.0.1pre"
3
+ end
@@ -0,0 +1,188 @@
1
+ require "spec_helper"
2
+
3
+ if MACOSX_VERSION >= 10.6
4
+ describe "parallel loop" do
5
+
6
+ describe :Integer do
7
+ describe :p_times do
8
+ before :each do
9
+ @count = 4
10
+ @ary = Array.new
11
+ @p_ary = Dispatch::Proxy.new []
12
+ end
13
+
14
+ it "runs the block that many times" do
15
+ @count.times { |j| @ary << 1 }
16
+ @count.p_times { |j| @p_ary << 1 }
17
+ @p_ary.size.should == @ary.size
18
+ end
19
+
20
+ it "runs the block passing the current index" do
21
+ @count.times { |j| @ary << j }
22
+ @count.p_times { |j| @p_ary << j}
23
+ @p_ary.sort.should == @ary
24
+ end
25
+
26
+ it "does not run the block if the count is zero" do
27
+ 0.p_times { |j| @ary << 1 }
28
+ @ary.size.should == 0
29
+ end
30
+
31
+ it "properly combines blocks with even stride > 1" do
32
+ @count.times { |j| @ary << j }
33
+ @count.p_times(2) { |j| @p_ary << j}
34
+ @p_ary.sort.should == @ary
35
+ end
36
+
37
+ it "properly combines blocks with uneven stride" do
38
+ @count.times { |j| @ary << j }
39
+ @count.p_times(3) { |j| @p_ary << j}
40
+ @p_ary.sort.should == @ary
41
+ end
42
+
43
+ it "properly rounds stride fraction of 0.5" do
44
+ 6.times { |j| @ary << j }
45
+ 6.p_times(4) { |j| @p_ary << j}
46
+ @p_ary.sort.should == @ary
47
+ end
48
+
49
+ it "properly rounds stride fraction > 0.5" do
50
+ 7.times { |j| @ary << j }
51
+ 7.p_times(4) { |j| @p_ary << j}
52
+ @p_ary.sort.should == @ary
53
+ end
54
+ end
55
+ end
56
+
57
+ describe "Enumerable" do
58
+ before :each do
59
+ @rng = (1..3)
60
+ @ary = @rng.to_a
61
+ end
62
+
63
+ describe :p_each do
64
+ it "exists on objects that support Enumerable" do
65
+ @ary.respond_to?(:p_each).should == true
66
+ @rng.respond_to?(:p_each).should == true
67
+ end
68
+
69
+ it "should behave like each" do
70
+ @ary1 = 0
71
+ @ary.each {|v| @ary1 << v*v}
72
+ @ary2 = 0
73
+ @q = Dispatch::Queue.for(@ary2)
74
+ @ary.p_each {|v| temp = v*v; @q.sync {@ary2 << temp} }
75
+ @ary2.should == @ary1
76
+ end
77
+
78
+ it "should work with ranges" do
79
+ @ary1 = 0
80
+ @ary.each {|v| @ary1 << v*v}
81
+ @ary2 = 0
82
+ @q = Dispatch::Queue.for(@ary2)
83
+ @rng.p_each {|v| temp = v*v; @q.sync {@ary2 << temp} }
84
+ @ary2.should == @ary1
85
+ end
86
+
87
+ it "should execute concurrently" do
88
+ t0 = Time.now
89
+ @ary.p_each {|v| sleep v/100.0}
90
+ t1 = Time.now
91
+ t_total = @ary.inject(0) {|a,b| a + b/100.0}
92
+ (t1-t0).to_f.should < t_total
93
+ end
94
+ end
95
+
96
+ describe :p_each_with_index do
97
+ it "exists on objects that support Enumerable" do
98
+ @ary.respond_to?(:p_each).should == true
99
+ end
100
+
101
+ it "should behave like each_with_index" do
102
+ @ary1 = 0
103
+ @ary.each_with_index {|v, i| @ary1 << v**i}
104
+ @ary2 = 0
105
+ @q = Dispatch::Queue.for(@ary2)
106
+ @ary.p_each_with_index {|v, i| temp = v**i; @q.sync {@ary2 << temp} }
107
+ @ary2.should == @ary1
108
+ end
109
+ end
110
+
111
+ describe :p_map do
112
+ it "exists on objects that support Enumerable" do
113
+ @ary.respond_to?(:p_map).should == true
114
+ end
115
+
116
+ it "should behave like map" do
117
+ map1 = @ary.map {|v| v*v}
118
+ map2 = @ary.p_map {|v| v*v}
119
+ map2.should == map1
120
+ end
121
+
122
+ it "should stride safely" do
123
+ map1 = @ary.map {|v| v*v}
124
+ map2 = @ary.p_map(2) {|v| v*v}
125
+ map2.should == map1
126
+ end
127
+ end
128
+
129
+ describe :p_mapreduce do
130
+ it "exists on objects that support Enumerable" do
131
+ @ary.respond_to?(:p_mapreduce).should == true
132
+ end
133
+
134
+ it "should behave like an unordered map" do
135
+ map1 = @ary.map {|v| v*v}
136
+ map2 = @ary.p_mapreduce([]) {|v| [v*v]}
137
+ map2.sort.should == map1
138
+ end
139
+
140
+ it "should accumulate any object that takes :+ " do
141
+ map1 = @ary.map {|v| "%x" % (10+v)}
142
+ map2 = @ary.p_mapreduce("") {|v| "%x" % (10+v)}
143
+ map1.each do |s|
144
+ map2.index(s).should_not == nil
145
+ end
146
+ end
147
+
148
+ it "should allow custom accumulator methods" do
149
+ map1 = @ary.map {|v| v**2}
150
+ sum1 = map1.inject(0) {|s,v| s | v}
151
+ sum2 = @ary.p_mapreduce(0, :|) {|v| v**2}
152
+ sum2.should == sum1
153
+ end
154
+ end
155
+
156
+ describe :p_find_all do
157
+ it "exists on objects that support Enumerable" do
158
+ @ary.respond_to?(:p_find_all).should == true
159
+ end
160
+
161
+ it "should behave like find_all" do
162
+ found1 = @ary.find_all {|v| v.odd?}
163
+ found2 = @ary.p_find_all {|v| v.odd?}
164
+ found2.sort.should == found1
165
+ end
166
+ end
167
+
168
+ describe :p_find do
169
+ it "exists on objects that support Enumerable" do
170
+ @ary.respond_to?(:p_find).should == true
171
+ end
172
+
173
+ it "returns nil if nothing found" do
174
+ found2 = @ary.p_find {|v| false}
175
+ found2.should.nil?
176
+ end
177
+
178
+ it "returns one element that matches the condition" do
179
+ found1 = @ary.find_all {|v| v.odd?}
180
+ found2 = @ary.p_find {|v| v.odd?}
181
+ found2.should_not.nil?
182
+ found1.include? found2
183
+ end
184
+ end
185
+ end
186
+
187
+ end
188
+ end