tupelo 0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/COPYING +22 -0
- data/README.md +422 -0
- data/Rakefile +77 -0
- data/bench/pipeline.rb +25 -0
- data/bugs/take-write.rb +19 -0
- data/bugs/write-read.rb +15 -0
- data/example/add.rb +19 -0
- data/example/app-and-tup.rb +30 -0
- data/example/async-transaction.rb +16 -0
- data/example/balance-xfer-locking.rb +50 -0
- data/example/balance-xfer-retry.rb +55 -0
- data/example/balance-xfer.rb +33 -0
- data/example/boolean-match.rb +32 -0
- data/example/bounded-retry.rb +35 -0
- data/example/broker-locking.rb +43 -0
- data/example/broker-optimistic-async.rb +33 -0
- data/example/broker-optimistic.rb +41 -0
- data/example/broker-queue.rb +2 -0
- data/example/cancel.rb +17 -0
- data/example/concurrent-transactions.rb +39 -0
- data/example/custom-class.rb +29 -0
- data/example/custom-search.rb +27 -0
- data/example/fail-and-retry.rb +29 -0
- data/example/hash-tuples.rb +53 -0
- data/example/increment.rb +21 -0
- data/example/lock-mgr-with-queue.rb +75 -0
- data/example/lock-mgr.rb +62 -0
- data/example/map-reduce-v2.rb +96 -0
- data/example/map-reduce.rb +77 -0
- data/example/matching.rb +9 -0
- data/example/notify.rb +35 -0
- data/example/optimist.rb +20 -0
- data/example/pulse.rb +24 -0
- data/example/read-in-trans.rb +56 -0
- data/example/small-simplified.rb +18 -0
- data/example/small.rb +76 -0
- data/example/tcp.rb +35 -0
- data/example/timeout-trans.rb +21 -0
- data/example/timeout.rb +27 -0
- data/example/tiny-client.rb +14 -0
- data/example/tiny-server.rb +12 -0
- data/example/transaction-logic.rb +40 -0
- data/example/write-wait.rb +17 -0
- data/lib/tupelo/app.rb +121 -0
- data/lib/tupelo/archiver/tuplespace.rb +68 -0
- data/lib/tupelo/archiver/worker.rb +87 -0
- data/lib/tupelo/archiver.rb +86 -0
- data/lib/tupelo/client/common.rb +10 -0
- data/lib/tupelo/client/reader.rb +124 -0
- data/lib/tupelo/client/transaction.rb +455 -0
- data/lib/tupelo/client/tuplespace.rb +50 -0
- data/lib/tupelo/client/worker.rb +493 -0
- data/lib/tupelo/client.rb +44 -0
- data/lib/tupelo/version.rb +3 -0
- data/test/lib/mock-client.rb +38 -0
- data/test/lib/mock-msg.rb +47 -0
- data/test/lib/mock-queue.rb +42 -0
- data/test/lib/mock-seq.rb +50 -0
- data/test/lib/testable-worker.rb +24 -0
- data/test/stress/concurrent-transactions.rb +42 -0
- data/test/system/test-archiver.rb +35 -0
- data/test/unit/test-mock-queue.rb +93 -0
- data/test/unit/test-mock-seq.rb +39 -0
- data/test/unit/test-ops.rb +222 -0
- metadata +134 -0
@@ -0,0 +1,493 @@
|
|
1
|
+
require 'thread'
|
2
|
+
require 'tupelo/client/reader'
|
3
|
+
require 'tupelo/client/transaction'
|
4
|
+
require 'object-template'
|
5
|
+
require 'atdo'
|
6
|
+
|
7
|
+
class Tupelo::Client
|
8
|
+
class Worker
|
9
|
+
attr_reader :client
|
10
|
+
attr_reader :seq
|
11
|
+
attr_reader :arc
|
12
|
+
attr_reader :log
|
13
|
+
attr_reader :client_id
|
14
|
+
attr_reader :local_tick
|
15
|
+
attr_reader :global_tick
|
16
|
+
attr_reader :start_tick
|
17
|
+
attr_reader :delta
|
18
|
+
attr_reader :msg_reader_thread
|
19
|
+
attr_reader :worker_thread
|
20
|
+
attr_reader :cmd_queue
|
21
|
+
attr_reader :tuplespace
|
22
|
+
attr_reader :message_class
|
23
|
+
attr_reader :blobber
|
24
|
+
attr_reader :read_waiters
|
25
|
+
attr_reader :prep_waiters
|
26
|
+
attr_reader :trans_waiters
|
27
|
+
attr_reader :notify_waiters
|
28
|
+
|
29
|
+
GET_TUPLESPACE = "get tuplespace"
|
30
|
+
|
31
|
+
class Operation
|
32
|
+
attr_reader :atomic, :writes, :pulses, :takes, :reads
|
33
|
+
## "put" or "set" operation to ensure that at least one
|
34
|
+
## copy of a tuple exists?
|
35
|
+
|
36
|
+
def initialize atomic, writes, pulses, takes, reads
|
37
|
+
@atomic, @writes, @pulses, @takes, @reads =
|
38
|
+
atomic, writes, pulses, takes, reads
|
39
|
+
end
|
40
|
+
|
41
|
+
def to_s
|
42
|
+
ops = [ ["write", writes], ["pulse", pulses],
|
43
|
+
["take", takes], ["read", reads] ]
|
44
|
+
ops.map! do |label, tuples|
|
45
|
+
["#{label} #{tuples.map(&:inspect).join(", ")}"] unless tuples.empty?
|
46
|
+
end
|
47
|
+
ops.compact!
|
48
|
+
|
49
|
+
[atomic ? "atomic" : "batch", ops.join("; ")].join(" ")
|
50
|
+
end
|
51
|
+
alias inspect to_s
|
52
|
+
end
|
53
|
+
|
54
|
+
def initialize client
|
55
|
+
@client = client
|
56
|
+
@seq = nil
|
57
|
+
@arc = nil
|
58
|
+
@log = client.log
|
59
|
+
|
60
|
+
@client_id = nil
|
61
|
+
@global_tick = nil
|
62
|
+
@start_tick = nil
|
63
|
+
@local_tick = 0
|
64
|
+
@delta = 0
|
65
|
+
|
66
|
+
@cmd_queue = client.make_queue
|
67
|
+
@tuplespace =
|
68
|
+
begin
|
69
|
+
client.tuplespace.new
|
70
|
+
rescue NoMethodError
|
71
|
+
client.tuplespace
|
72
|
+
end
|
73
|
+
@message_class = client.message_class
|
74
|
+
@blobber = nil
|
75
|
+
|
76
|
+
@read_waiters = []
|
77
|
+
@prep_waiters = []
|
78
|
+
@trans_waiters = []
|
79
|
+
@notify_waiters = []
|
80
|
+
@stopping = false
|
81
|
+
end
|
82
|
+
|
83
|
+
def start
|
84
|
+
return if @worker_thread
|
85
|
+
|
86
|
+
log.info "worker starting"
|
87
|
+
observe_started_client
|
88
|
+
|
89
|
+
@msg_reader_thread = Thread.new do
|
90
|
+
run_msg_reader_thread
|
91
|
+
end
|
92
|
+
|
93
|
+
@worker_thread = Thread.new do
|
94
|
+
run_worker_thread
|
95
|
+
end
|
96
|
+
end
|
97
|
+
|
98
|
+
def in_thread?
|
99
|
+
Thread.current == worker_thread
|
100
|
+
end
|
101
|
+
|
102
|
+
def observe_started_client
|
103
|
+
@client_id = client.client_id
|
104
|
+
@blobber = client.blobber
|
105
|
+
@seq = client.seq
|
106
|
+
@arc = client.arc
|
107
|
+
@start_tick = client.start_tick
|
108
|
+
end
|
109
|
+
|
110
|
+
def stop
|
111
|
+
cmd_queue << :stop
|
112
|
+
worker_thread.join if worker_thread ## join(limit)?
|
113
|
+
msg_reader_thread.kill if msg_reader_thread
|
114
|
+
@atdo.stop if @atdo
|
115
|
+
## optionally write final state (including global_tick) to disk
|
116
|
+
end
|
117
|
+
|
118
|
+
# stop without any remote handshaking
|
119
|
+
def stop!
|
120
|
+
@msg_reader_thread.kill if msg_reader_thread
|
121
|
+
@worker_thread.kill if worker_thread
|
122
|
+
@atdo.stop if @atdo
|
123
|
+
end
|
124
|
+
|
125
|
+
def at time, &action
|
126
|
+
@atdo ||= AtDo.new
|
127
|
+
@atdo.at time do
|
128
|
+
cmd_queue << action
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
def << cmd
|
133
|
+
cmd_queue << cmd
|
134
|
+
end
|
135
|
+
|
136
|
+
def run_msg_reader_thread
|
137
|
+
read_messages_from_seq
|
138
|
+
log.warn "connection to seq closed"
|
139
|
+
handle_seq_closed
|
140
|
+
rescue => ex
|
141
|
+
log.error ex
|
142
|
+
raise
|
143
|
+
end
|
144
|
+
|
145
|
+
def handle_seq_closed
|
146
|
+
## what to do here in general?
|
147
|
+
## for each waiter, push :stop into queue ?
|
148
|
+
end
|
149
|
+
|
150
|
+
def read_messages_from_seq
|
151
|
+
seq.each do |msg|
|
152
|
+
## skip msg if tags not relevant
|
153
|
+
## (and client must apply tags--i.e. not using multicast)
|
154
|
+
self << msg
|
155
|
+
end
|
156
|
+
end
|
157
|
+
|
158
|
+
def run_worker_thread
|
159
|
+
update_to_tick @start_tick
|
160
|
+
run_request_loop
|
161
|
+
rescue => ex
|
162
|
+
log.error ex
|
163
|
+
raise
|
164
|
+
end
|
165
|
+
|
166
|
+
def run_request_loop
|
167
|
+
catch :done do
|
168
|
+
loop do
|
169
|
+
handle_one_request
|
170
|
+
end
|
171
|
+
end
|
172
|
+
end
|
173
|
+
|
174
|
+
def handle_one_request
|
175
|
+
case cmd = cmd_queue.pop
|
176
|
+
when :stop
|
177
|
+
@stopping = true
|
178
|
+
if trans_waiters.empty?
|
179
|
+
throw :done
|
180
|
+
else
|
181
|
+
log.info {"stopping; waiting for #{trans_waiters}"}
|
182
|
+
end
|
183
|
+
when message_class
|
184
|
+
handle_message cmd
|
185
|
+
if @stopping
|
186
|
+
if trans_waiters.empty?
|
187
|
+
throw :done
|
188
|
+
else
|
189
|
+
log.info {"stopping; waiting for #{trans_waiters}"}
|
190
|
+
end
|
191
|
+
end
|
192
|
+
else
|
193
|
+
handle_client_request cmd unless @stopping
|
194
|
+
end
|
195
|
+
end
|
196
|
+
|
197
|
+
def update_to_tick tick
|
198
|
+
# at this point we know that the seq messages now accumulating in
|
199
|
+
# cmd_queue are tick+1, tick+2, .... In particular, if tick==0, we don't
|
200
|
+
# need to get state from archiver.
|
201
|
+
log.debug {"update_to_tick #{tick}"}
|
202
|
+
|
203
|
+
if tick == 0
|
204
|
+
@global_tick = 0
|
205
|
+
return
|
206
|
+
end
|
207
|
+
|
208
|
+
if false
|
209
|
+
## load from file, update @global_tick, and see if equal to tick
|
210
|
+
if @global_tick == tick
|
211
|
+
return
|
212
|
+
elsif @global_tick > tick
|
213
|
+
raise "bad tick: #{@global_tick} > #{tick}"
|
214
|
+
end
|
215
|
+
end
|
216
|
+
|
217
|
+
unless arc
|
218
|
+
log.warn "no archiver provided; assuming pubsub mode; " +
|
219
|
+
"some client ops (take and local read) will not work"
|
220
|
+
@global_tick = tick
|
221
|
+
log.info "global_tick = #{global_tick}"
|
222
|
+
return
|
223
|
+
end
|
224
|
+
|
225
|
+
log.info "requesting tuplespace from arc"
|
226
|
+
arc << [GET_TUPLESPACE, nil]
|
227
|
+
## replace nil with template tuples, if any
|
228
|
+
|
229
|
+
begin
|
230
|
+
arc_tick = arc.read[0]
|
231
|
+
log.info "arc says global_tick = #{arc_tick}"
|
232
|
+
|
233
|
+
done = false
|
234
|
+
arc.each do |tuple|
|
235
|
+
if tuple.nil?
|
236
|
+
done = true
|
237
|
+
else
|
238
|
+
raise "bad object stream from archiver" if done
|
239
|
+
tuplespace.insert tuple
|
240
|
+
end
|
241
|
+
end
|
242
|
+
unless done
|
243
|
+
raise "did not get all of tuplespace from archiver" ## roll back?
|
244
|
+
end
|
245
|
+
|
246
|
+
log.info "received tuplespace from arc"
|
247
|
+
|
248
|
+
@global_tick = arc_tick
|
249
|
+
log.info "global_tick = #{global_tick}"
|
250
|
+
end
|
251
|
+
|
252
|
+
ensure
|
253
|
+
arc.close if arc and not arc.closed?
|
254
|
+
end
|
255
|
+
|
256
|
+
def handle_message msg
|
257
|
+
log.debug {"seq sent #{msg.inspect}"}
|
258
|
+
|
259
|
+
if msg.global_tick != global_tick + 1
|
260
|
+
if msg.global_tick < global_tick + 1
|
261
|
+
log.debug {"discarding redundant message at #{msg.global_tick}"}
|
262
|
+
# due to archiver timing, for example
|
263
|
+
return
|
264
|
+
elsif msg.global_tick > global_tick + 1
|
265
|
+
log.error "message out of order: #{msg.inspect}, " +
|
266
|
+
"received at global_tick=#{global_tick}"
|
267
|
+
## exit? wait? in udp case, get history
|
268
|
+
end
|
269
|
+
end
|
270
|
+
|
271
|
+
@global_tick += 1
|
272
|
+
@delta = 0
|
273
|
+
|
274
|
+
record_history msg
|
275
|
+
op = Operation.new(*blobber.load(msg.blob)) ## op.freeze_deeply
|
276
|
+
log.debug {"applying #{op} from client #{msg.client_id}"}
|
277
|
+
|
278
|
+
notify_waiters.each do |waiter|
|
279
|
+
waiter << [:attempt, msg.global_tick, msg.client_id, op]
|
280
|
+
end
|
281
|
+
|
282
|
+
granted_tuples = tuplespace.find_distinct_matches_for(op.takes)
|
283
|
+
read_tuples = op.reads.map {|t| tuplespace.find_match_for(t)}
|
284
|
+
|
285
|
+
succeeded = !op.atomic || (granted_tuples.all? && read_tuples.all?)
|
286
|
+
actual_tuples = granted_tuples.compact
|
287
|
+
|
288
|
+
if succeeded
|
289
|
+
log.debug {"inserting #{op.writes}; deleting #{actual_tuples}"}
|
290
|
+
tuplespace.transaction inserts: op.writes, deletes: actual_tuples
|
291
|
+
end
|
292
|
+
|
293
|
+
notify_waiters.each do |waiter|
|
294
|
+
waiter << [
|
295
|
+
succeeded ? :success : :failure,
|
296
|
+
msg.global_tick, msg.client_id, op]
|
297
|
+
end
|
298
|
+
|
299
|
+
trans = nil
|
300
|
+
if msg.client_id == client_id
|
301
|
+
trans = trans_waiters.first
|
302
|
+
unless trans and trans.local_tick == msg.local_tick
|
303
|
+
log.error "transaction #{op} out of order in sequence " +
|
304
|
+
trans_waiters.inspect
|
305
|
+
## exit? wait?
|
306
|
+
end
|
307
|
+
trans_waiters.shift
|
308
|
+
log.debug {"operation belongs to this client: #{trans.inspect}"}
|
309
|
+
end
|
310
|
+
|
311
|
+
if not actual_tuples.empty?
|
312
|
+
if succeeded
|
313
|
+
actual_tuples.each do |tuple|
|
314
|
+
prep_waiters.keep_if do |waiter|
|
315
|
+
waiter.unprepare tuple
|
316
|
+
## optimization: track number of instances of tuple, to avoid
|
317
|
+
## false positive in #unprepare
|
318
|
+
end
|
319
|
+
end
|
320
|
+
|
321
|
+
log.debug {trans ? "taking #{granted_tuples}" :
|
322
|
+
"client #{msg.client_id} takes #{granted_tuples}"}
|
323
|
+
|
324
|
+
else
|
325
|
+
log.debug {
|
326
|
+
missing = op.takes - actual_tuples
|
327
|
+
trans ? "failed to take #{missing}" :
|
328
|
+
"client #{msg.client_id} failed to take #{missing}"}
|
329
|
+
end
|
330
|
+
end
|
331
|
+
|
332
|
+
if succeeded
|
333
|
+
op.writes.each do |tuple|
|
334
|
+
read_waiters.delete_if do |waiter|
|
335
|
+
waiter.gloms tuple
|
336
|
+
end
|
337
|
+
end
|
338
|
+
|
339
|
+
op.pulses.each do |tuple|
|
340
|
+
log.debug {"pulsing #{tuple}"}
|
341
|
+
read_waiters.delete_if do |waiter|
|
342
|
+
waiter.gloms tuple
|
343
|
+
end
|
344
|
+
end
|
345
|
+
|
346
|
+
op.writes.each do |tuple|
|
347
|
+
prep_waiters.keep_if do |waiter|
|
348
|
+
waiter.prepare tuple
|
349
|
+
end
|
350
|
+
end
|
351
|
+
end
|
352
|
+
|
353
|
+
if trans
|
354
|
+
trans_waiters.delete trans
|
355
|
+
|
356
|
+
if succeeded
|
357
|
+
trans.done msg.global_tick, granted_tuples # note: tuples not frozen
|
358
|
+
else
|
359
|
+
trans.fail (op.takes - actual_tuples) + (op.reads - read_tuples)
|
360
|
+
end
|
361
|
+
end
|
362
|
+
end
|
363
|
+
|
364
|
+
def record_history msg; end
|
365
|
+
|
366
|
+
def handle_client_request req
|
367
|
+
log.debug {"client requested #{req.inspect}"}
|
368
|
+
|
369
|
+
case req
|
370
|
+
when message_class
|
371
|
+
raise "only seq can send messages"
|
372
|
+
|
373
|
+
when Waiter
|
374
|
+
handle_waiter req
|
375
|
+
|
376
|
+
when Matcher
|
377
|
+
handle_matcher req
|
378
|
+
|
379
|
+
when Unwaiter
|
380
|
+
handle_unwaiter req
|
381
|
+
|
382
|
+
when Transaction
|
383
|
+
handle_transaction req
|
384
|
+
|
385
|
+
when NotifyWaiter
|
386
|
+
notify_waiters.delete req or notify_waiters.push req
|
387
|
+
|
388
|
+
when Proc
|
389
|
+
req.call
|
390
|
+
|
391
|
+
else
|
392
|
+
raise "unknown request from client: #{req}"
|
393
|
+
end
|
394
|
+
rescue => ex
|
395
|
+
log.error "error while handling #{req.inspect}: #{ex}"
|
396
|
+
## Raise an error in the waiter? Need to generalize the mechanism in
|
397
|
+
## Transaction.
|
398
|
+
end
|
399
|
+
|
400
|
+
def handle_transaction t
|
401
|
+
case
|
402
|
+
when t.open?
|
403
|
+
t.prepare
|
404
|
+
prep_waiters << t unless prep_waiters.include? t
|
405
|
+
when t.closed?
|
406
|
+
t.submit
|
407
|
+
prep_waiters.delete t
|
408
|
+
when t.failed?
|
409
|
+
else
|
410
|
+
log.warn "not open or closed or failed: #{t.inspect}"
|
411
|
+
end
|
412
|
+
rescue => ex
|
413
|
+
log.error "error while handling #{t.inspect}: #{ex}"
|
414
|
+
t.error ex
|
415
|
+
end
|
416
|
+
|
417
|
+
def handle_unwaiter unwaiter
|
418
|
+
waiter = unwaiter.waiter
|
419
|
+
read_waiters.delete waiter or prep_waiters.delete waiter
|
420
|
+
end
|
421
|
+
|
422
|
+
def handle_waiter waiter
|
423
|
+
tuplespace.find {|tuple| waiter.gloms tuple} or
|
424
|
+
read_waiters << waiter
|
425
|
+
## optimize: if template is just a tuple, use hashing,
|
426
|
+
## but will need to expose waiter.tuple
|
427
|
+
end
|
428
|
+
|
429
|
+
def handle_matcher matcher
|
430
|
+
if matcher.all
|
431
|
+
tuplespace.each {|tuple| matcher.gloms tuple}
|
432
|
+
matcher.fails
|
433
|
+
else
|
434
|
+
tuplespace.find {|tuple| matcher.gloms tuple} or
|
435
|
+
matcher.fails
|
436
|
+
end
|
437
|
+
end
|
438
|
+
|
439
|
+
def send_transaction transaction
|
440
|
+
msg = message_class.new
|
441
|
+
msg.client_id = client_id
|
442
|
+
msg.local_tick = local_tick + 1
|
443
|
+
msg.global_tick = global_tick
|
444
|
+
msg.delta = delta + 1 # pipelined write/take
|
445
|
+
##msg.tags = nil
|
446
|
+
|
447
|
+
begin
|
448
|
+
msg.blob = blobber.dump([
|
449
|
+
transaction.atomic,
|
450
|
+
transaction.writes,
|
451
|
+
transaction.pulses,
|
452
|
+
transaction.take_tuples,
|
453
|
+
transaction.read_tuples
|
454
|
+
])
|
455
|
+
## optimization: use bitfields to identify which ops are present
|
456
|
+
## (instead of nils), and combine this with atomic flag in one int
|
457
|
+
rescue => ex
|
458
|
+
raise ex, "cannot serialize #{transaction.inspect}: #{ex}"
|
459
|
+
end
|
460
|
+
|
461
|
+
begin
|
462
|
+
seq << msg
|
463
|
+
rescue => ex
|
464
|
+
raise ex, "cannot send request for #{transaction.inspect}: #{ex}"
|
465
|
+
end
|
466
|
+
|
467
|
+
@local_tick += 1
|
468
|
+
@delta += 1
|
469
|
+
|
470
|
+
trans_waiters << transaction
|
471
|
+
|
472
|
+
return msg.local_tick
|
473
|
+
end
|
474
|
+
|
475
|
+
# Used by api to protect worker's copy from client changes.
|
476
|
+
# Also, for serialization types that don't represent symbols,
|
477
|
+
# this converts a template so that it works correctly regardless.
|
478
|
+
# So keyword args are very natural: read(k1: val, k2: val)
|
479
|
+
def make_template obj
|
480
|
+
return obj unless obj.respond_to? :to_ary or obj.respond_to? :to_hash
|
481
|
+
spec = Marshal.load(Marshal.dump(obj))
|
482
|
+
rot_for(spec).optimize!
|
483
|
+
end
|
484
|
+
|
485
|
+
def rot_for spec
|
486
|
+
RubyObjectTemplate.new(spec, proc {|k| blobber.load(blobber.dump(k))})
|
487
|
+
end
|
488
|
+
|
489
|
+
def pot_for spec
|
490
|
+
PortableObjectTemplate.new(spec, proc {|k| blobber.load(blobber.dump(k))})
|
491
|
+
end
|
492
|
+
end
|
493
|
+
end
|
@@ -0,0 +1,44 @@
|
|
1
|
+
require 'funl/client'
|
2
|
+
|
3
|
+
module Tupelo
|
4
|
+
class Client < Funl::Client
|
5
|
+
require 'tupelo/client/worker'
|
6
|
+
require 'tupelo/client/tuplespace'
|
7
|
+
|
8
|
+
include Api
|
9
|
+
|
10
|
+
attr_reader :worker
|
11
|
+
attr_reader :tuplespace
|
12
|
+
|
13
|
+
def initialize(tuplespace: SimpleTuplespace, **opts)
|
14
|
+
super **opts
|
15
|
+
@tuplespace = tuplespace
|
16
|
+
@worker = make_worker
|
17
|
+
end
|
18
|
+
|
19
|
+
def make_worker
|
20
|
+
Worker.new self
|
21
|
+
end
|
22
|
+
|
23
|
+
def make_queue
|
24
|
+
Queue.new ## use lock-free queue based on Atomic
|
25
|
+
end
|
26
|
+
|
27
|
+
def start
|
28
|
+
super
|
29
|
+
worker.start
|
30
|
+
end
|
31
|
+
|
32
|
+
def stop
|
33
|
+
worker.stop
|
34
|
+
end
|
35
|
+
|
36
|
+
def log *args
|
37
|
+
if args.empty?
|
38
|
+
super()
|
39
|
+
else
|
40
|
+
super().unknown *args
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
@@ -0,0 +1,38 @@
|
|
1
|
+
require 'tupelo/client/reader'
|
2
|
+
require 'tupelo/client/transaction'
|
3
|
+
|
4
|
+
require 'mock-queue.rb'
|
5
|
+
|
6
|
+
class MockClient
|
7
|
+
include Tupelo::Client::Api
|
8
|
+
|
9
|
+
attr_accessor :worker
|
10
|
+
attr_accessor :log
|
11
|
+
attr_accessor :client_id
|
12
|
+
attr_accessor :blobber
|
13
|
+
attr_accessor :message_class
|
14
|
+
attr_accessor :tuplespace
|
15
|
+
attr_accessor :seq
|
16
|
+
attr_accessor :arc
|
17
|
+
attr_accessor :start_tick
|
18
|
+
|
19
|
+
def updater
|
20
|
+
@updater ||=
|
21
|
+
Fiber.new do
|
22
|
+
loop do
|
23
|
+
log.debug "update begin"
|
24
|
+
worker.update
|
25
|
+
log.debug "update end"
|
26
|
+
Fiber.yield
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
def update
|
32
|
+
updater.resume
|
33
|
+
end
|
34
|
+
|
35
|
+
def make_queue
|
36
|
+
MockQueue.new
|
37
|
+
end
|
38
|
+
end
|
@@ -0,0 +1,47 @@
|
|
1
|
+
class MockMessage
|
2
|
+
attr_accessor :client_id
|
3
|
+
attr_accessor :local_tick
|
4
|
+
attr_accessor :global_tick
|
5
|
+
attr_accessor :delta
|
6
|
+
attr_accessor :tags
|
7
|
+
attr_accessor :blob
|
8
|
+
|
9
|
+
def initialize(*args)
|
10
|
+
@client_id, @local_tick, @global_tick, @delta, @tags, @blob = *args
|
11
|
+
end
|
12
|
+
|
13
|
+
def self.[](
|
14
|
+
client: nil, local: nil, global: nil, delta: nil, tags: nil, blob: nil)
|
15
|
+
new client, local, global, delta, tags, blob
|
16
|
+
end
|
17
|
+
|
18
|
+
def inspect
|
19
|
+
d = delta ? "+#{delta}" : nil
|
20
|
+
t = tags ? " #{tags}" : nil
|
21
|
+
s = [
|
22
|
+
"client #{client_id}",
|
23
|
+
"local #{local_tick}",
|
24
|
+
"global #{global_tick}#{d}"
|
25
|
+
].join(", ")
|
26
|
+
"<Message: #{s}#{t}>"
|
27
|
+
end
|
28
|
+
|
29
|
+
def to_a
|
30
|
+
[@client_id, @local_tick, @global_tick, @delta, @tags, @blob]
|
31
|
+
end
|
32
|
+
|
33
|
+
def == other
|
34
|
+
other.kind_of? self.class and
|
35
|
+
@client_id = other.client_id and
|
36
|
+
@local_tick = other.local_tick and
|
37
|
+
@global_tick = other.global_tick and
|
38
|
+
@delta = other.delta and
|
39
|
+
@tags = other.tags and
|
40
|
+
@blob = other.blob
|
41
|
+
end
|
42
|
+
alias eql? ==
|
43
|
+
|
44
|
+
def hash
|
45
|
+
@client_id.hash ^ @local_tick.hash ^ @global_tick.hash
|
46
|
+
end
|
47
|
+
end
|
@@ -0,0 +1,42 @@
|
|
1
|
+
class MockQueue
|
2
|
+
class QueueEmptyError < StandardError; end
|
3
|
+
|
4
|
+
attr_reader :entries
|
5
|
+
attr_reader :yield_on_push
|
6
|
+
attr_reader :yield_on_pop
|
7
|
+
|
8
|
+
def initialize yield_on_push: false, yield_on_pop: false
|
9
|
+
@entries = []
|
10
|
+
@yield_on_push = yield_on_push
|
11
|
+
@yield_on_pop = yield_on_pop
|
12
|
+
end
|
13
|
+
|
14
|
+
def empty?
|
15
|
+
entries.empty?
|
16
|
+
end
|
17
|
+
|
18
|
+
def push val
|
19
|
+
@entries << val
|
20
|
+
if yield_on_push
|
21
|
+
Fiber.yield([:push, val]) rescue FiberError
|
22
|
+
end
|
23
|
+
end
|
24
|
+
alias << push
|
25
|
+
|
26
|
+
def pop
|
27
|
+
begin
|
28
|
+
while @entries.empty?
|
29
|
+
Fiber.yield :block
|
30
|
+
end
|
31
|
+
rescue FiberError
|
32
|
+
raise QueueEmptyError, "queue empty"
|
33
|
+
end
|
34
|
+
|
35
|
+
val = @entries.shift
|
36
|
+
if yield_on_pop
|
37
|
+
Fiber.yield([:pop, val]) rescue FiberError
|
38
|
+
end
|
39
|
+
val
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
@@ -0,0 +1,50 @@
|
|
1
|
+
class MockSequencer
|
2
|
+
attr_reader :messages
|
3
|
+
|
4
|
+
def initialize
|
5
|
+
@messages = []
|
6
|
+
end
|
7
|
+
|
8
|
+
def tick
|
9
|
+
messages.size
|
10
|
+
end
|
11
|
+
|
12
|
+
def << message
|
13
|
+
message = Marshal.load(Marshal.dump(message))
|
14
|
+
message.global_tick = tick + 1
|
15
|
+
message.delta = nil
|
16
|
+
@messages << message
|
17
|
+
end
|
18
|
+
|
19
|
+
def [](i)
|
20
|
+
@messages[i]
|
21
|
+
end
|
22
|
+
|
23
|
+
def stream
|
24
|
+
Stream.new(self)
|
25
|
+
end
|
26
|
+
|
27
|
+
class Stream
|
28
|
+
include Enumerable
|
29
|
+
|
30
|
+
def initialize seq
|
31
|
+
@seq = seq
|
32
|
+
@read_tick = 0
|
33
|
+
end
|
34
|
+
|
35
|
+
def << message
|
36
|
+
@seq << message
|
37
|
+
end
|
38
|
+
alias write <<
|
39
|
+
|
40
|
+
def read
|
41
|
+
@seq.messages[@read_tick].tap {|m| m and @read_tick += 1}
|
42
|
+
end
|
43
|
+
|
44
|
+
def each
|
45
|
+
while message = read
|
46
|
+
yield message
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|