dispatch 0.0.1pre

Sign up to get free protection for your applications and to get access to all the features.
data/Rakefile ADDED
@@ -0,0 +1,2 @@
1
+ require 'bundler'
2
+ Bundler::GemHelper.install_tasks
data/dispatch.gemspec ADDED
@@ -0,0 +1,21 @@
1
+ # -*- encoding: utf-8 -*-
2
+ $:.push File.expand_path("../lib", __FILE__)
3
+ require "dispatch/version"
4
+
5
+ Gem::Specification.new do |s|
6
+ s.name = "dispatch"
7
+ s.version = Dispatch::VERSION
8
+ s.platform = Gem::Platform::RUBY
9
+ s.authors = ["Arthur Gunn"]
10
+ s.email = ["arthur@gunn.co.nz"]
11
+ s.homepage = "https://github.com/gunn/dispatch"
12
+ s.summary = %q{Dispatch is a MacRuby wrapper around Mac OS X's Grand Central Dispatch.}
13
+ s.description = %q{Grand Central Dispatch is natively implemented as a C API and runtime engine. This gem provides a MacRuby wrapper around that API and allows Ruby to very easily take advantage of GCD to run tasks in parrallel and do calculations asynchronously with queues automatically mapping to threads as needed.}
14
+
15
+ s.rubyforge_project = "dispatch"
16
+
17
+ s.files = `git ls-files`.split("\n")
18
+ s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
19
+ s.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) }
20
+ s.require_paths = ["lib"]
21
+ end
@@ -0,0 +1,90 @@
1
+ #!/usr/local/bin/macruby
2
+ #
3
+ # Iterate using different GCD techniques to illustrate relative performance/overhead
4
+ #
5
+ # Inspired by: https://developer.apple.com/mac/library/samplecode/Dispatch_Compared/index.html
6
+
7
+ require 'dispatch'
8
+ require 'benchmark'
9
+
10
+ $max_tasks = 256
11
+ $reps = 1024
12
+ $folds = 8
13
+ $results = nil#[]
14
+
15
+ $group = Dispatch::Group.new
16
+ $queue = Dispatch::Queue.new('org.macruby.gcd.serial')
17
+
18
+ class Benchmark
19
+ def self.repeat(count, label="", &block)
20
+ raise "count: #{count} < 1" if count < 1
21
+ block.call
22
+ t = measure {count.times &block} / count
23
+ Tms.new(*t.to_a[1..-1], label)
24
+ end
25
+ end
26
+
27
+ def work_function(i)
28
+ x = 1.0+i*i
29
+ $folds.times {|j| x = Math::tan(Math::PI/2 - Math::atan(Math::exp(2*Math::log(Math::sqrt(x))))) }
30
+ $results[i] = x if not $results.nil?
31
+ end
32
+
33
+ def times(n)
34
+ n.times {|i| work_function(i)}
35
+ end
36
+
37
+ def ptimes(n)
38
+ n.p_times {|i| work_function(i)}
39
+ end
40
+
41
+ def apply(n)
42
+ Dispatch::Queue.concurrent.apply(n) {|i| work_function(i)}
43
+ end
44
+
45
+ def concur(n)
46
+ q = Dispatch::Queue.concurrent
47
+ n.times do |i|
48
+ q.async($group) {work_function(i)}
49
+ end
50
+ $group.wait
51
+ end
52
+
53
+ def serial(n)
54
+ n.times {|i| $queue.async {work_function(i)}}
55
+ $queue.sync { }
56
+ end
57
+
58
+ def nqueue(n)
59
+ n.times do |i|
60
+ Dispatch::Queue.new("org.macruby.gcd.multi.#{i}").async($group) {work_function(i)}
61
+ end
62
+ $group.wait
63
+ end
64
+
65
+ def njobs(n)
66
+ j = Dispatch::Job.new
67
+ n.times {|i| j.add { work_function(i) }}
68
+ j.join
69
+ end
70
+
71
+ def bench(method, count=1)
72
+ proc = Proc.new { send(method.to_sym, count) }
73
+ Benchmark.repeat($reps, "%6s" % method, &proc).real*1e6/count
74
+ end
75
+
76
+ METHODS = %w(times ptimes apply concur serial nqueue njobs)
77
+ TASKS = [t = 1]
78
+ TASKS << t *= 2 while t < $max_tasks
79
+
80
+ print "GCD BENCHMARKS\tMaxTask\t#{$max_tasks}\tFolds\t#{$folds}\tReps\t#{$reps}\n"
81
+ print "T µsec\t #{TASKS.join("\t ")}"
82
+
83
+ METHODS.each do |method|
84
+ print "\n#{method}"
85
+ TASKS.each do |n|
86
+ print "\t%6.2f" % bench(method, n)
87
+ end
88
+ end
89
+ puts
90
+ print "Results: #{$results.join("\t")}" if not $results.nil?
@@ -0,0 +1,277 @@
1
+ #!/usr/local/bin/macruby
2
+
3
+ require 'dispatch'
4
+ job = Dispatch::Job.new { Math.sqrt(10**100) }
5
+ @result = job.value
6
+ puts "value (sync): #{@result} => 1.0e+50"
7
+
8
+ job.value {|v| puts "value (async): #{v} => 1.0e+50" } # (eventually)
9
+ job.join
10
+ puts "join done (sync)"
11
+
12
+ job.join { puts "join done (async)" }
13
+ job.add { Math.sqrt(2**64) }
14
+ job.value {|b| puts "value (async): #{b} => 4294967296.0" }
15
+ @values = job.values
16
+ puts "values: #{@values.inspect} => [1.0E50]"
17
+ job.join
18
+ puts "values: #{@values.inspect} => [1.0E50, 4294967296.0]"
19
+ job = Dispatch::Job.new {}
20
+ @hash = job.synchronize Hash.new
21
+ puts "synchronize: #{@hash.class} => Dispatch::Proxy"
22
+
23
+ puts "values: #{job.values.class} => Dispatch::Proxy"
24
+
25
+ @hash[:foo] = :bar
26
+ puts "proxy: #{@hash} => {:foo=>:bar}"
27
+ @hash.delete :foo
28
+
29
+
30
+ [64, 100].each do |n|
31
+ job.add { @hash[n] = Math.sqrt(10**n) }
32
+ end
33
+ job.join
34
+ puts "proxy: #{@hash} => {64 => 1.0E32, 100 => 1.0E50}"
35
+
36
+ @hash.inspect { |s| puts "inspect: #{s} => {64 => 1.0E32, 100 => 1.0E50}" }
37
+ delegate = @hash.__value__
38
+ puts "\n__value__: #{delegate.class} => Hash"
39
+
40
+ n = 42
41
+ job = Dispatch::Job.new { puts "n (during): #{n} => 42" }
42
+ job.join
43
+
44
+ n = 0
45
+ job = Dispatch::Job.new { n = 21 }
46
+ job.join
47
+ puts "n (after): #{n} => 0?!?"
48
+ n = 0
49
+ job = Dispatch::Job.new { n += 84 }
50
+ job.join
51
+ puts "n (+=): #{n} => 0?!?"
52
+ 5.times { |i| print "#{10**i}\t" }
53
+ puts "times"
54
+
55
+ 5.p_times { |i| print "#{10**i}\t" }
56
+ puts "p_times"
57
+
58
+ 5.p_times(3) { |i| print "#{10**i}\t" }
59
+ puts "p_times(3)"
60
+ DAYS=%w(Mon Tue Wed Thu Fri)
61
+ DAYS.each { |day| print "#{day}\t"}
62
+ puts "each"
63
+ DAYS.p_each { |day| print "#{day}\t"}
64
+ puts "p_each"
65
+ DAYS.p_each(3) { |day| print "#{day}\t"}
66
+ puts "p_each(3)"
67
+ DAYS.each_with_index { |day, i | print "#{i}:#{day}\t"}
68
+ puts "each_with_index"
69
+ DAYS.p_each_with_index { |day, i | print "#{i}:#{day}\t"}
70
+ puts "p_each_with_index"
71
+ DAYS.p_each_with_index(3) { |day, i | print "#{i}:#{day}\t"}
72
+ puts "p_each_with_index(3)"
73
+ print (0..4).map { |i| "#{10**i}\t" }.join
74
+ puts "map"
75
+
76
+ print (0..4).p_map { |i| "#{10**i}\t" }.join
77
+ puts "p_map"
78
+ print (0..4).p_map(3) { |i| "#{10**i}\t" }.join
79
+ puts "p_map(3)"
80
+ mr = (0..4).p_mapreduce(0) { |i| 10**i }
81
+ puts "p_mapreduce: #{mr} => 11111"
82
+ mr = (0..4).p_mapreduce([], :concat) { |i| [10**i] }
83
+ puts "p_mapreduce(:concat): #{mr} => [1, 1000, 10, 100, 10000]"
84
+
85
+ mr = (0..4).p_mapreduce([], :concat, 3) { |i| [10**i] }
86
+ puts "p_mapreduce(3): #{mr} => [1000, 10000, 1, 10, 100]"
87
+ puts "find_all | p_find_all | p_find_all(3)"
88
+ puts (0..4).find_all { |i| i.odd? }.inspect
89
+ puts (0..4).p_find_all { |i| i.odd? }.inspect
90
+ puts (0..4).p_find_all(3) { |i| i.odd? }.inspect
91
+
92
+ puts "find | p_find | p_find(3)"
93
+ puts (0..4).find { |i| i == 5 }.nil? # => nil
94
+ puts (0..4).p_find { |i| i == 5 }.nil? # => nil
95
+ puts (0..4).p_find(3) { |i| i == 5 }.nil? # => nil
96
+ puts "#{(0..4).find { |i| i.odd? }} => 1"
97
+ puts "#{(0..4).p_find { |i| i.odd? }} => 1?"
98
+ puts "#{(0..4).p_find(3) { |i| i.odd? }} => 3?"
99
+ puts
100
+ puts q = Dispatch::Queue.new("org.macruby.queue.example")
101
+ q.sync { puts "queue sync" }
102
+
103
+ q.async { puts "queue async" }
104
+
105
+ puts "queue join"
106
+ q.join
107
+ puts
108
+ puts semaphore = Dispatch::Semaphore.new(0)
109
+ q.async {
110
+ puts "semaphore signal"
111
+ semaphore.signal
112
+ }
113
+
114
+ puts "semaphore wait"
115
+ semaphore.wait
116
+
117
+
118
+ puts
119
+ timer = Dispatch::Source.periodic(0.4) do |src|
120
+ puts "Dispatch::Source.periodic: #{src.data}"
121
+ end
122
+ sleep 1 # => 1 1 ...
123
+
124
+ timer.suspend!
125
+ puts "suspend!"
126
+ sleep 1
127
+ timer.resume!
128
+ puts "resume!"
129
+ sleep 1 # => 1 2 1 ...
130
+ timer.cancel!
131
+ puts "cancel!"
132
+ puts
133
+ @sum = 0
134
+ adder = Dispatch::Source.add do |s|
135
+ puts "Dispatch::Source.add: #{s.data} (#{@sum += s.data})"
136
+ semaphore.signal
137
+ end
138
+ adder << 1
139
+ semaphore.wait
140
+ puts "sum: #{@sum} => 1"
141
+ adder.suspend!
142
+ adder << 3
143
+ adder << 5
144
+ puts "sum: #{@sum} => 1"
145
+ adder.resume!
146
+ semaphore.wait
147
+ puts "sum: #{@sum} => 9"
148
+ adder.cancel!
149
+ @mask = 0
150
+ masker = Dispatch::Source.or do |s|
151
+ @mask |= s.data
152
+ puts "Dispatch::Source.or: #{s.data.to_s(2)} (#{@mask.to_s(2)})"
153
+ semaphore.signal
154
+ end
155
+ masker << 0b0001
156
+ semaphore.wait
157
+ puts "mask: #{@mask.to_s(2)} => 1"
158
+ masker.suspend!
159
+ masker << 0b0011
160
+ masker << 0b1010
161
+ puts "mask: #{@mask.to_s(2)} => 1"
162
+ masker.resume!
163
+ semaphore.wait
164
+ puts "mask: #{@mask.to_s(2)} => 1011"
165
+ masker.cancel!
166
+ puts
167
+
168
+ @event = 0
169
+ mask = Dispatch::Source::PROC_EXIT | Dispatch::Source::PROC_SIGNAL
170
+ proc_src = Dispatch::Source.process($$, mask) do |s|
171
+ @event |= s.data
172
+ puts "Dispatch::Source.process: #{s.data.to_s(2)} (#{@event.to_s(2)})"
173
+ semaphore.signal
174
+ end
175
+
176
+
177
+ semaphore2 = Dispatch::Semaphore.new(0)
178
+ @events = []
179
+ mask2 = [:exit, :fork, :exec, :signal]
180
+ proc_src2 = Dispatch::Source.process($$, mask2) do |s|
181
+ these = Dispatch::Source.data2events(s.data)
182
+ @events += these
183
+ puts "Dispatch::Source.process: #{these} (#{@events})"
184
+ semaphore2.signal
185
+ end
186
+ sig_usr1 = Signal.list["USR1"]
187
+ Signal.trap(sig_usr1, "IGNORE")
188
+ Process.kill(sig_usr1, $$)
189
+ Signal.trap(sig_usr1, "DEFAULT")
190
+ semaphore.wait
191
+ result = @event & mask
192
+ print "@event: #{result.to_s(2)} =>"
193
+ puts " #{Dispatch::Source::PROC_SIGNAL.to_s(2)} (Dispatch::Source::PROC_SIGNAL)"
194
+ proc_src.cancel!
195
+ semaphore2.wait
196
+ puts "@events: #{(result2 = @events & mask2)} => [:signal]"
197
+ proc_src2.cancel!
198
+ puts "event2num: #{Dispatch::Source.event2num(result2[0]).to_s(2)} => #{result.to_s(2)}"
199
+ puts "data2events: #{Dispatch::Source.data2events(result)} => #{result2}"
200
+ @signals = 0
201
+ sig_usr2 = Signal.list["USR2"]
202
+ signal = Dispatch::Source.signal(sig_usr2) do |s|
203
+ puts "Dispatch::Source.signal: #{s.data} (#{@signals += s.data})"
204
+ semaphore.signal
205
+ end
206
+ puts "signals: #{@signals} => 0"
207
+ signal.suspend!
208
+ Signal.trap(sig_usr2, "IGNORE")
209
+ 3.times { Process.kill(sig_usr2, $$) }
210
+ Signal.trap(sig_usr2, "DEFAULT")
211
+ signal.resume!
212
+ semaphore.wait
213
+ puts "signals: #{@signals} => 3"
214
+ signal.cancel!
215
+ puts
216
+ @fevent = 0
217
+ @msg = "#{$$}-#{Time.now.to_s.gsub(' ','_')}"
218
+ puts "msg: #{@msg}"
219
+ filename = "/tmp/dispatch-#{@msg}"
220
+ puts "filename: #{filename}"
221
+ file = File.open(filename, "w")
222
+ fmask = Dispatch::Source::VNODE_DELETE | Dispatch::Source::VNODE_WRITE
223
+ file_src = Dispatch::Source.file(file.fileno, fmask, q) do |s|
224
+ @fevent |= s.data
225
+ puts "Dispatch::Source.file: #{s.data.to_s(2)} (#{@fevent.to_s(2)})"
226
+ semaphore.signal
227
+ end
228
+ file.print @msg
229
+ file.flush
230
+ file.close
231
+ semaphore.wait(0.1)
232
+ print "fevent: #{(@fevent & fmask).to_s(2)} =>"
233
+ puts " #{Dispatch::Source::VNODE_WRITE.to_s(2)} (Dispatch::Source::VNODE_WRITE)"
234
+ File.delete(filename)
235
+ semaphore.wait(0.1)
236
+ print "fevent: #{@fevent.to_s(2)} => #{fmask.to_s(2)}"
237
+ puts " (Dispatch::Source::VNODE_DELETE | Dispatch::Source::VNODE_WRITE)"
238
+ file_src.cancel!
239
+ q.join
240
+
241
+ @fevent2 = []
242
+ file = File.open(filename, "w")
243
+ fmask2 = %w(delete write)
244
+ file_src2 = Dispatch::Source.file(file, fmask2) do |s|
245
+ @fevent2 += Dispatch::Source.data2events(s.data)
246
+ puts "Dispatch::Source.file: #{Dispatch::Source.data2events(s.data)} (#{@fevent2})"
247
+ semaphore2.signal
248
+ end
249
+ file.print @msg
250
+ file.flush
251
+ semaphore2.wait(0.1)
252
+ puts "fevent2: #{@fevent2} => [:write]"
253
+ file_src2.cancel!
254
+
255
+ file = File.open(filename, "r")
256
+ @input = ""
257
+ reader = Dispatch::Source.read(file) do |s|
258
+ @input << file.read(s.data)
259
+ puts "Dispatch::Source.read: #{s.data}: #{@input}"
260
+ end
261
+ while (@input.size < @msg.size) do; end
262
+ puts "input: #{@input} => #{@msg}" # => e.g., 74323-2010-07-07_15:23:10_-0700
263
+ reader.cancel!
264
+ file = File.open(filename, "w")
265
+ @next_char = 0
266
+ writer = Dispatch::Source.write(file) do |s|
267
+ if @next_char < @msg.size then
268
+ char = @msg[@next_char]
269
+ file.write(char)
270
+ @next_char += 1
271
+ puts "Dispatch::Source.write: #{char}|#{@msg[@next_char..-1]}"
272
+ end
273
+ end
274
+ while (@next_char < @msg.size) do; end
275
+ puts "output: #{File.read(filename)} => #{@msg}" # e.g., 74323-2010-07-07_15:23:10_-0700
276
+ File.delete(filename)
277
+
@@ -0,0 +1,5 @@
1
+ #!/bin/sh
2
+ DISPATCH=../
3
+ /bin/echo -n "#!"
4
+ which macruby
5
+ grep " " $DISPATCH/README.rdoc | sed "s/ //" | grep -v '\$ '
@@ -0,0 +1,34 @@
1
+ #!/usr/local/bin/macruby
2
+
3
+ # An implementation of futures (delayed computations) on top of GCD.
4
+ # Original implementation written by Patrick Thomson.
5
+ # Improvements made by Ben Stiglitz.
6
+
7
+ include Dispatch
8
+
9
+ class Future
10
+ def initialize(&block)
11
+ # Each thread gets its own FIFO queue upon which we will dispatch
12
+ # the delayed computation passed in the &block variable.
13
+ Thread.current[:futures] ||= Queue.new("org.macruby.futures-#{Thread.current.object_id}")
14
+ # Groups are just simple layers on top of semaphores.
15
+ @group = Group.new
16
+ # Asynchronously dispatch the future to the thread-local queue.
17
+ Thread.current[:futures].async(@group) { @value = block[] }
18
+ end
19
+
20
+ def value
21
+ # Wait fo the computation to finish. If it has already finished, then
22
+ # just return the value in question.
23
+ @group.wait
24
+ @value
25
+ end
26
+ end
27
+
28
+
29
+ f = Future.new do
30
+ sleep 2.5
31
+ 'some value'
32
+ end
33
+
34
+ p f.value
@@ -0,0 +1,95 @@
1
+ #!/usr/local/bin/macruby
2
+ # Ruby Fiber Ring Benchmark
3
+ # Adapted for GCD from: http://people.equars.com/2008/5/22/ruby-fiber-ring-benchmark
4
+
5
+ require 'benchmark'
6
+ require 'dispatch'
7
+
8
+ DEBUG = false
9
+
10
+ START = DEBUG ? 0 : 1
11
+ N_NODES = DEBUG ? 1 : 4
12
+ M_MESSAGES = DEBUG ? 0 : 3
13
+
14
+ class Node
15
+ attr_accessor :successor
16
+ attr_reader :index
17
+ def initialize(g, index, successor)
18
+ @queue = Dispatch::Queue.for(self)
19
+ @group = g
20
+ @index = index
21
+ @successor = successor
22
+ @current = 0
23
+ end
24
+
25
+ def call(m)
26
+ @queue.async(@group) do
27
+ case m
28
+ when 0
29
+ return
30
+ when @current
31
+ call(m-1)
32
+ else
33
+ puts "\t#{self}.call(#{m})" if DEBUG
34
+ @current = m
35
+ @successor.call(m)
36
+ end
37
+ end
38
+ end
39
+
40
+ def to_s
41
+ "##{@index}->#{@successor.index}[#{@current}]"
42
+ end
43
+ end
44
+
45
+ class Ring
46
+ def initialize(n)
47
+ @group = Dispatch::Group.new
48
+ @nodes = []
49
+ setup(n)
50
+ end
51
+
52
+ def setup(n)
53
+ last = nil
54
+ n.downto(1) do |i|
55
+ @nodes << Node.new(@group, i, last)
56
+ last = @nodes[-1]
57
+ end
58
+ @nodes[0].successor = last
59
+ end
60
+
61
+ def call(m)
62
+ @nodes[-1].call(m)
63
+ @group.wait
64
+ end
65
+
66
+ def to_s
67
+ @nodes.reverse.join " | "
68
+ end
69
+ end
70
+
71
+ def bench(n,m)
72
+ tm = Benchmark.measure {
73
+ yield
74
+ }.format("%8.6r\n").gsub!(/\(|\)/, "")
75
+
76
+ puts "#{n}, #{m}, #{tm}"
77
+
78
+ end
79
+
80
+ START.upto N_NODES do |p|
81
+ n = 10**p
82
+ ring = Ring.new n
83
+ puts "\nRing of size #{n}:"
84
+ puts "\t#{ring}" if DEBUG
85
+ START.upto(M_MESSAGES) do |q|
86
+ r = 10**q
87
+ [r, 2*r, 5*r].each do |m|
88
+ puts "#{m} message(s)" if DEBUG
89
+ bench(n,m) { ring.call m }
90
+ end
91
+ end
92
+ end
93
+
94
+
95
+