tupelo 0.9 → 0.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +31 -1
  3. data/bugs/read-take.rb +19 -0
  4. data/bugs/take-write.rb +7 -7
  5. data/example/app-and-tup.rb +11 -8
  6. data/example/async-transaction.rb +7 -7
  7. data/example/balance-xfer-locking.rb +13 -13
  8. data/example/balance-xfer-retry.rb +16 -16
  9. data/example/balance-xfer.rb +9 -9
  10. data/example/boolean-match.rb +5 -5
  11. data/example/bounded-retry.rb +9 -9
  12. data/example/broker-locking.rb +14 -14
  13. data/example/broker-optimistic.rb +7 -7
  14. data/example/cancel.rb +7 -7
  15. data/example/concurrent-transactions.rb +17 -17
  16. data/example/custom-class.rb +9 -9
  17. data/example/custom-search.rb +8 -8
  18. data/example/fail-and-retry.rb +11 -11
  19. data/example/hash-tuples.rb +12 -10
  20. data/example/increment.rb +8 -8
  21. data/example/load-balancer.rb +2 -1
  22. data/example/lock-mgr-with-queue.rb +18 -18
  23. data/example/lock-mgr.rb +18 -18
  24. data/example/map-reduce-v2.rb +11 -11
  25. data/example/map-reduce.rb +11 -11
  26. data/example/matching.rb +5 -5
  27. data/example/message-bus.rb +6 -3
  28. data/example/notify.rb +17 -17
  29. data/example/optimist.rb +9 -9
  30. data/example/parallel.rb +16 -8
  31. data/example/pregel/distributed.rb +129 -0
  32. data/example/pregel/pagerank.rb +72 -0
  33. data/example/pregel/pregel.rb +102 -0
  34. data/example/pregel/remote.rb +165 -0
  35. data/example/pulse.rb +10 -10
  36. data/example/read-in-trans.rb +15 -15
  37. data/example/subspace.rb +34 -0
  38. data/example/take-nowait.rb +1 -0
  39. data/example/tcp.rb +3 -3
  40. data/example/timeout-trans.rb +5 -5
  41. data/example/timeout.rb +10 -9
  42. data/example/tiny-client.rb +5 -5
  43. data/example/tiny-server.rb +2 -2
  44. data/example/transaction-logic.rb +16 -16
  45. data/example/wait-interrupt.rb +38 -0
  46. data/example/write-wait.rb +11 -11
  47. data/lib/tupelo/archiver/tuplespace.rb +5 -1
  48. data/lib/tupelo/archiver/worker.rb +25 -18
  49. data/lib/tupelo/client/reader.rb +2 -2
  50. data/lib/tupelo/client/transaction.rb +79 -36
  51. data/lib/tupelo/client/tuplespace.rb +1 -0
  52. data/lib/tupelo/client/worker.rb +107 -13
  53. data/lib/tupelo/client.rb +36 -2
  54. data/lib/tupelo/version.rb +1 -1
  55. data/test/lib/mock-client.rb +4 -0
  56. data/test/lib/testable-worker.rb +1 -1
  57. data/test/stress/concurrent-transactions.rb +15 -15
  58. data/test/system/test-archiver.rb +8 -8
  59. data/test/unit/test-ops.rb +56 -0
  60. metadata +72 -68
  61. data/bugs/write-read.rb +0 -15
  62. data/example/broker-queue.rb +0 -35
  63. data/example/child-of-child.rb +0 -34
@@ -0,0 +1,129 @@
1
+ # Distributed version of pagerank.rb.
2
+
3
+ # TODO
4
+ #
5
+ # Improvements noted in the article.
6
+ # Scale better with subspaces and sqlite or other data structure.
7
+ # And the subspaces could be defined by consistent hashing and smarter
8
+ # partitioning.
9
+ # Also, need to handle crashed process and lost tuple (as lease.rb maybe).
10
+ # Would be nice to have remote option as well.
11
+ # Abstract out domain-specific code from generic framework code.
12
+ # Option to compare result with that of pagerank.rb using same seed.
13
+
14
+ require 'tupelo/app'
15
+
16
+ NUM_WORKERS = 4
17
+ NUM_VERTICES = 10
18
+ PRNG_SEED = 1234
19
+
20
+ def update vertex, incoming_messages, vs_dst
21
+ vertex = vertex.dup
22
+ incoming_messages ||= []
23
+ outgoing_messages = []
24
+ v_me = vertex["id"]
25
+ rank = vertex["rank"]
26
+ step = vertex["step"]
27
+ active = true
28
+
29
+ if step < 50
30
+ rank = 0.15 / NUM_VERTICES + 0.85 * incoming_messages.inject(0.0) {|sum, m|
31
+ sum + m["rank"]}
32
+ outgoing_rank = rank / vs_dst.size
33
+ outgoing_messages = vs_dst.map {|v_dst|
34
+ {src: v_me, dst: v_dst, step: step + 1, rank: outgoing_rank}}
35
+ else
36
+ active = false
37
+ end
38
+
39
+ vertex["rank"] = rank
40
+ vertex["active"] = active
41
+ vertex["step"] += 1
42
+
43
+ [vertex, outgoing_messages]
44
+ end
45
+
46
+ Tupelo.application do
47
+
48
+ NUM_WORKERS.times do |i|
49
+ child passive: true do
50
+ log.progname = "worker #{i}"
51
+
52
+ loop do
53
+ step = vertex = nil
54
+ transaction do
55
+ step = read(step: Numeric)["step"]
56
+ vertex = take id: nil, step: step, rank: nil, active: true
57
+ # wait for a vertex to be available on current step, but retry if
58
+ # step changes
59
+ end
60
+
61
+ v_me = vertex["id"]
62
+ vs_dst = read_all(src: v_me, dst: nil).map {|h| h["dst"]}
63
+ vs_src = read_all(src: nil, dst: v_me).map {|h| h["src"]}
64
+
65
+ incoming_messages = transaction do
66
+ vs_src.inject([]) do |ms, v_src|
67
+ while m=take_nowait(src: v_src, dst: v_me, step: step, rank: nil)
68
+ ms << m
69
+ end
70
+ ms
71
+ end
72
+ end
73
+
74
+ vertex, outgoing_messages = update(vertex, incoming_messages, vs_dst)
75
+ write vertex, *outgoing_messages
76
+
77
+ transaction do
78
+ n = take(count: Integer, active: vertex["active"])["count"]
79
+ write count: n + 1, active: vertex["active"]
80
+ end
81
+ end
82
+ end
83
+ end
84
+
85
+ child do
86
+ log.progname = "coordinator"
87
+
88
+ step = 0
89
+ v_ids = (0...NUM_VERTICES).to_a
90
+
91
+ v_ids.each do |v_id|
92
+ write id: v_id, step: step, rank: 1.0/NUM_VERTICES, active: true
93
+ end
94
+
95
+ srand PRNG_SEED
96
+ v_ids.each do |v_src|
97
+ v_ids.sample(4).each do |v_dst|
98
+ write src: v_src, dst: v_dst
99
+ end
100
+ end
101
+
102
+ write count: 0, active: true
103
+ write count: 0, active: false
104
+ loop do
105
+ log "step: #{step}"
106
+ transaction do
107
+ write step: step
108
+ take count: nil, active: true
109
+ write count: 0, active: true
110
+ end
111
+
112
+ # wait for all vertices to finish step and maybe become inactive
113
+ done = transaction do
114
+ n_active = read(count: nil, active: true)["count"]
115
+ read(count: NUM_VERTICES - n_active, active: false)
116
+ n_active == 0
117
+ end
118
+
119
+ if done
120
+ vs = read_all(id: nil, step: nil, rank: nil, active: nil)
121
+ log vs.sort_by {|v| v["id"]}.map {|v| v["rank"]}
122
+ exit
123
+ end
124
+
125
+ take step: step
126
+ step += 1
127
+ end
128
+ end
129
+ end
@@ -0,0 +1,72 @@
1
+ # port of pagerank.py (not distributed)
2
+
3
+ require './pregel'
4
+ require 'narray'
5
+
6
+ NUM_WORKERS = 4
7
+ NUM_VERTICES = 10
8
+
9
+ def main
10
+ vertices = NUM_VERTICES.times.map {|j|
11
+ PageRankVertex.new(j, 1.0/NUM_VERTICES, [])}
12
+ create_edges(vertices)
13
+
14
+ pr_test = pagerank_test(vertices)
15
+ puts "Test computation of pagerank:\n%p" % pr_test
16
+
17
+ pr_pregel = pagerank_pregel(vertices)
18
+ puts "Pregel computation of pagerank:\n%p" % pr_pregel
19
+
20
+ diff = pr_pregel - pr_test
21
+ puts "Difference between the two pagerank vectors:\n%p" % diff
22
+
23
+ norm = Math.sqrt(diff * diff)
24
+ puts "The norm of the difference is: %p" % norm
25
+ end
26
+
27
+ def create_edges vertices
28
+ vertices.each do |vertex|
29
+ vertex.out_vertices = vertices.sample(4)
30
+ end
31
+ end
32
+
33
+ def pagerank_test vertices
34
+ ident = NMatrix.float(NUM_VERTICES, NUM_VERTICES).unit
35
+ g = NMatrix.float(NUM_VERTICES, NUM_VERTICES)
36
+
37
+ vertices.each do |vertex|
38
+ num_out_vertices = vertex.out_vertices.size
39
+ vertex.out_vertices.each do |out_vertex|
40
+ g[vertex.id, out_vertex.id] = 1.0/num_out_vertices
41
+ # node reversed dimensions, a funny feature of NArray!
42
+ end
43
+ end
44
+
45
+ mp = (1.0/NUM_VERTICES)*NVector.float(NUM_VERTICES).fill!(1)
46
+ return 0.15 * ((ident - 0.85 * g).inverse) * mp
47
+ end
48
+
49
+ def pagerank_pregel vertices
50
+ pregel = Pregel.new(vertices, NUM_WORKERS)
51
+ pregel.run()
52
+ return NVector.to_na(pregel.vertices.map {|vertex| vertex.value})
53
+ end
54
+
55
+ class PageRankVertex < Vertex
56
+ def update
57
+ if superstep < 50
58
+ @value = 0.15 / NUM_VERTICES +
59
+ 0.85 * incoming_messages.inject(0.0) {|sum, (vertex, pagerank)|
60
+ sum + pagerank}
61
+ outgoing_pagerank = value / out_vertices.size
62
+ @outgoing_messages = out_vertices.map {|vertex|
63
+ [vertex, outgoing_pagerank]}
64
+ else
65
+ @active = false
66
+ end
67
+ end
68
+ end
69
+
70
+ if __FILE__ == $0
71
+ main
72
+ end
@@ -0,0 +1,102 @@
1
+ # port of pregel.py (not distributed)
2
+
3
+ class Vertex
4
+ attr_reader :id, :value, :active, :superstep
5
+ attr_accessor :out_vertices
6
+ attr_reader :incoming_messages, :outgoing_messages
7
+
8
+ def initialize id, value, out_vertices
9
+ @id = id
10
+ @value = value
11
+ @out_vertices = out_vertices
12
+ @incoming_messages = []
13
+ @outgoing_messages = []
14
+ @active = true
15
+ @superstep = 0
16
+ end
17
+
18
+ def next_superstep
19
+ @superstep += 1
20
+ @incoming_messages = []
21
+ end
22
+ end
23
+
24
+ class Pregel
25
+ attr_reader :vertices, :num_workers, :partition
26
+
27
+ def initialize vertices, num_workers
28
+ @vertices = vertices
29
+ @num_workers = num_workers
30
+ @partition = nil
31
+ end
32
+
33
+ def run
34
+ @partition = partition_vertices
35
+ while check_active
36
+ superstep
37
+ redistribute_messages
38
+ end
39
+ end
40
+
41
+ # returns {0 => [vertex,...],.... (num_workers-1) => [...]}
42
+ def partition_vertices
43
+ vertices.group_by {|vertex| worker(vertex)}
44
+ end
45
+
46
+ def worker vertex
47
+ vertex.hash % num_workers
48
+ end
49
+
50
+ def superstep
51
+ workers = []
52
+ partition.each_value do |vertex_list|
53
+ workers << Worker.new(vertex_list)
54
+ end
55
+
56
+ workers.each do |worker|
57
+ worker.join
58
+ end
59
+ end
60
+
61
+ def redistribute_messages
62
+ vertices.each do |vertex|
63
+ vertex.next_superstep
64
+ end
65
+
66
+ vertices.each do |vertex|
67
+ vertex.outgoing_messages.each do |receiving_vertex, message|
68
+ receiving_vertex.incoming_messages << [vertex, message]
69
+ end
70
+ end
71
+ end
72
+
73
+ def check_active
74
+ vertices.any? {|vertex| vertex.active}
75
+ end
76
+ end
77
+
78
+ class Worker
79
+ attr_reader :vertices
80
+
81
+ def initialize vertices
82
+ @vertices = vertices
83
+ @thread = Thread.new {run}
84
+ @thread.abort_on_exception = true
85
+ end
86
+
87
+ def join
88
+ @thread.join
89
+ end
90
+
91
+ def run
92
+ superstep
93
+ end
94
+
95
+ def superstep
96
+ vertices.each do |vertex|
97
+ if vertex.active
98
+ vertex.update
99
+ end
100
+ end
101
+ end
102
+ end
@@ -0,0 +1,165 @@
1
+ # Distributed version of pagerank.rb.
2
+
3
+ # TODO
4
+ #
5
+ # Improvements noted in the article.
6
+ # Scale better with subspaces and sqlite or other data structure.
7
+ # And the subspaces could be defined by consistent hashing and smarter
8
+ # partitioning.
9
+ # Also, need to handle crashed process and lost tuple (as lease.rb maybe).
10
+ # Would be nice to have remote option as well.
11
+ # Abstract out domain-specific code from generic framework code.
12
+ # Option to compare result with that of pagerank.rb using same seed.
13
+
14
+ require 'tupelo/app'
15
+
16
+ NUM_WORKERS = 4
17
+ NUM_VERTICES = 10
18
+ PRNG_SEED = 1234
19
+
20
+ def update vertex, incoming_messages, vs_dst
21
+ vertex = vertex.dup
22
+ incoming_messages ||= []
23
+ outgoing_messages = []
24
+ v_me = vertex["id"]
25
+ rank = vertex["rank"]
26
+ step = vertex["step"]
27
+ active = true
28
+
29
+ if step < 50
30
+ rank = 0.15 / NUM_VERTICES + 0.85 * incoming_messages.inject(0.0) {|sum, m|
31
+ sum + m["rank"]}
32
+ outgoing_rank = rank / vs_dst.size
33
+ outgoing_messages = vs_dst.map {|v_dst|
34
+ {src: v_me, dst: v_dst, step: step + 1, rank: outgoing_rank}}
35
+ else
36
+ active = false
37
+ end
38
+
39
+ vertex["rank"] = rank
40
+ vertex["active"] = active
41
+ vertex["step"] += 1
42
+
43
+ [vertex, outgoing_messages]
44
+ end
45
+
46
+ require 'tupelo/app/remote'
47
+ def host i
48
+ case i % 2
49
+ when 0; "od1"
50
+ when 1; "od2"
51
+ end
52
+ end
53
+
54
+ Tupelo.tcp_application do
55
+
56
+ NUM_WORKERS.times do |i|
57
+ # child passive: true do
58
+ remote host: host(i), log: true, passive: true, eval: %{
59
+ log "hello"
60
+ def update vertex, incoming_messages, vs_dst
61
+ vertex = vertex.dup
62
+ incoming_messages ||= []
63
+ outgoing_messages = []
64
+ v_me = vertex["id"]
65
+ rank = vertex["rank"]
66
+ step = vertex["step"]
67
+ active = true
68
+
69
+ if step < 50
70
+ rank = 0.15 / #{NUM_VERTICES} + 0.85 * incoming_messages.inject(0.0) {|sum, m|
71
+ sum + m["rank"]}
72
+ outgoing_rank = rank / vs_dst.size
73
+ outgoing_messages = vs_dst.map {|v_dst|
74
+ {src: v_me, dst: v_dst, step: step + 1, rank: outgoing_rank}}
75
+ else
76
+ active = false
77
+ end
78
+
79
+ vertex["rank"] = rank
80
+ vertex["active"] = active
81
+ vertex["step"] += 1
82
+
83
+ [vertex, outgoing_messages]
84
+ end
85
+
86
+ log.progname = "worker #{i}"
87
+
88
+ loop do
89
+ step = vertex = nil
90
+ transaction do
91
+ step = read(step: Numeric)["step"]
92
+ vertex = take id: nil, step: step, rank: nil, active: true
93
+ # wait for a vertex to be available on current step, but retry if
94
+ # step changes
95
+ end
96
+
97
+ v_me = vertex["id"]
98
+ vs_dst = read_all(src: v_me, dst: nil).map {|h| h["dst"]}
99
+ vs_src = read_all(src: nil, dst: v_me).map {|h| h["src"]}
100
+
101
+ incoming_messages = transaction do
102
+ vs_src.inject([]) do |ms, v_src|
103
+ while m=take_nowait(src: v_src, dst: v_me, step: step, rank: nil)
104
+ ms << m
105
+ end
106
+ ms
107
+ end
108
+ end
109
+
110
+ vertex, outgoing_messages = update(vertex, incoming_messages, vs_dst)
111
+ write vertex, *outgoing_messages
112
+
113
+ transaction do
114
+ n = take(count: Integer, active: vertex["active"])["count"]
115
+ write count: n + 1, active: vertex["active"]
116
+ end
117
+ end
118
+ }
119
+ end
120
+
121
+ child do
122
+ log.progname = "coordinator"
123
+
124
+ step = 0
125
+ v_ids = (0...NUM_VERTICES).to_a
126
+
127
+ v_ids.each do |v_id|
128
+ write id: v_id, step: step, rank: 1.0/NUM_VERTICES, active: true
129
+ end
130
+
131
+ srand PRNG_SEED
132
+ v_ids.each do |v_src|
133
+ v_ids.sample(4).each do |v_dst|
134
+ write src: v_src, dst: v_dst
135
+ end
136
+ end
137
+
138
+ write count: 0, active: true
139
+ write count: 0, active: false
140
+ loop do
141
+ log "step: #{step}"
142
+ transaction do
143
+ write step: step
144
+ take count: nil, active: true
145
+ write count: 0, active: true
146
+ end
147
+
148
+ # wait for all vertices to finish step and maybe become inactive
149
+ done = transaction do
150
+ n_active = read(count: nil, active: true)["count"]
151
+ read(count: NUM_VERTICES - n_active, active: false)
152
+ n_active == 0
153
+ end
154
+
155
+ if done
156
+ vs = read_all(id: nil, step: nil, rank: nil, active: nil)
157
+ log vs.sort_by {|v| v["id"]}.map {|v| v["rank"]}
158
+ exit
159
+ end
160
+
161
+ take step: step
162
+ step += 1
163
+ end
164
+ end
165
+ end
data/example/pulse.rb CHANGED
@@ -2,25 +2,25 @@
2
2
 
3
3
  require 'tupelo/app'
4
4
 
5
- Tupelo.application do |app|
6
- app.child do |client|
7
- client.take ['start']
5
+ Tupelo.application do
6
+ child do
7
+ take ['start']
8
8
  10.times do |i|
9
- client.pulse [i]
9
+ pulse [i]
10
10
  sleep 0.1
11
11
  end
12
- client.write ['finish']
12
+ write ['finish']
13
13
  end
14
14
 
15
- app.child do |client|
15
+ child do
16
16
  Thread.new do
17
17
  loop do
18
- client.log client.read [Integer]
19
- client.log client.read_all [Integer]
18
+ log read [Integer]
19
+ log read_all [Integer]
20
20
  end
21
21
  end
22
22
 
23
- client.write ['start']
24
- client.take ['finish']
23
+ write ['start']
24
+ take ['finish']
25
25
  end
26
26
  end
@@ -4,53 +4,53 @@ Thread.abort_on_exception = true
4
4
 
5
5
  N = 5
6
6
 
7
- Tupelo.application do |app|
7
+ Tupelo.application do
8
8
  N.times do |i|
9
- app.child do |client|
9
+ child do
10
10
  Thread.new do
11
11
  step = 0
12
12
  loop do
13
13
  step += 1
14
- client.transaction do |t|
15
- t.read ["enabled", i]
16
- t.write ["output", i, step]
14
+ transaction do
15
+ read ["enabled", i]
16
+ write ["output", i, step]
17
17
  end
18
18
  sleep 0.2
19
19
  end
20
20
  end
21
21
 
22
- client.read ["done"]
22
+ read ["done"]
23
23
  exit
24
24
  end
25
25
  end
26
26
 
27
- app.child do |client|
27
+ child do
28
28
  t = Thread.new do
29
29
  loop do
30
- msg, i, step = client.take [nil, nil, nil]
30
+ msg, i, step = take [nil, nil, nil]
31
31
  printf "%20s from %2d at step %3d\n", msg, i, step
32
32
  end
33
33
  end
34
34
 
35
35
  puts "Turning on 0 and 4"
36
- client.write ["enabled", 0]
37
- client.write ["enabled", 4]
36
+ write ["enabled", 0]
37
+ write ["enabled", 4]
38
38
  sleep 2
39
39
 
40
40
  puts "Turning off 0"
41
- client.take ["enabled", 0]
41
+ take ["enabled", 0]
42
42
  sleep 2
43
43
 
44
44
  puts "Turning off 4"
45
- client.take ["enabled", 4]
45
+ take ["enabled", 4]
46
46
  sleep 2
47
47
 
48
48
  puts "Turning on 1 and 3"
49
- client.write ["enabled", 1]
50
- client.write ["enabled", 3]
49
+ write ["enabled", 1]
50
+ write ["enabled", 3]
51
51
  sleep 2
52
52
 
53
53
  puts "Bye!"
54
- client.write ["done"]
54
+ write ["done"]
55
55
  end
56
56
  end
@@ -0,0 +1,34 @@
1
+ require 'tupelo/app'
2
+
3
+ Tupelo.application do
4
+ local do
5
+ log [subscribed_all, subscribed_tags]
6
+
7
+ use_subspaces!
8
+
9
+ define_subspace(
10
+ tag: "foo",
11
+ template: [
12
+ {type: "number"}
13
+ ]
14
+ )
15
+
16
+ write_wait [0]
17
+
18
+ log read_all(Object)
19
+ end
20
+
21
+ cid = child subscribe: ["foo"] do
22
+ log [subscribed_all, subscribed_tags]
23
+ write [1]
24
+ write_wait ["abc"]
25
+ log read_all(Object)
26
+ end
27
+ Process.wait cid
28
+
29
+ local do
30
+ log [subscribed_all, subscribed_tags]
31
+ log read_all(Object)
32
+ log read_all(subspace "foo")
33
+ end
34
+ end
@@ -1,6 +1,7 @@
1
1
  # Run this with --trace to see that, even in the FAIL case, take_nowait never
2
2
  # hangs waiting for a match. The "ready" tuple is just to keep the take
3
3
  # requests fairly close in time, increasing the chance of transaction failure.
4
+ # Exactly one of the contending clients will take the tuple.
4
5
 
5
6
  require 'tupelo/app'
6
7
 
data/example/tcp.rb CHANGED
@@ -28,11 +28,11 @@ port = 9901 # Use 0 to let system choose free port
28
28
  Tupelo.application servers_file: svr,
29
29
  seqd_addr: [:tcp, '0.0.0.0', port],
30
30
  cseqd_addr: [:tcp, '0.0.0.0', port + 1],
31
- arcd_addr: [:tcp, '0.0.0.0', port + 2] do |app|
32
- if app.owns_servers
31
+ arcd_addr: [:tcp, '0.0.0.0', port + 2] do
32
+ if owns_servers
33
33
  puts "server started; ^C to stop"
34
34
  puts "run in another terminal: ../bin/tup tcp.yaml"
35
- if app.log.level > Logger::INFO
35
+ if log.level > Logger::INFO
36
36
  puts "(run with --info or --trace to see events)"
37
37
  end
38
38
  sleep
@@ -4,17 +4,17 @@
4
4
 
5
5
  require 'tupelo/app'
6
6
 
7
- Tupelo.application do |app|
8
- app.child do |client|
7
+ Tupelo.application do
8
+ child do
9
9
  result =
10
10
  begin
11
- client.transaction timeout: 1 do |t|
12
- t.take ["foo"]
11
+ transaction timeout: 1 do
12
+ take ["foo"]
13
13
  end
14
14
  rescue TimeoutError => ex
15
15
  ex
16
16
  end
17
- client.log "result = #{result.inspect}"
17
+ log "This should be a timeout error: #{result.inspect}"
18
18
  end
19
19
  end
20
20
 
data/example/timeout.rb CHANGED
@@ -3,25 +3,26 @@
3
3
 
4
4
  require 'tupelo/app'
5
5
 
6
- Tupelo.application do |app|
7
- app.child do |client|
6
+ Tupelo.application do
7
+ child do
8
8
  begin
9
9
  n_sec = 2
10
10
  Timeout.timeout n_sec do
11
- client.log "waiting for non-existing tuple #{[0]}"
12
- client.read [0]
11
+ log "waiting for non-existing tuple #{[0]}"
12
+ read [0]
13
13
  end
14
14
  rescue TimeoutError
15
+ log "stopped waiting"
15
16
  end
16
17
 
17
- r = client.read [1]
18
- client.log "got #{r}"
18
+ r = read [1]
19
+ log "got #{r}"
19
20
  end
20
21
 
21
- app.child do |client|
22
+ child do
22
23
  sleep 1
23
- client.log "writing [1]"
24
- client.write [1]
24
+ log "writing [1]"
25
+ write [1]
25
26
  end
26
27
  end
27
28