tupelo 0.7 → 0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -63,12 +63,7 @@ class Tupelo::Client
63
63
  @delta = 0
64
64
 
65
65
  @cmd_queue = client.make_queue
66
- @tuplespace =
67
- begin
68
- client.tuplespace.new
69
- rescue NoMethodError
70
- client.tuplespace
71
- end
66
+ @tuplespace = nil
72
67
  @message_class = client.message_class
73
68
  @blobber = nil
74
69
 
@@ -86,6 +81,16 @@ class Tupelo::Client
86
81
  @log.unknown *args
87
82
  end
88
83
  end
84
+
85
+ def tuplespace
86
+ @tuplespace ||= begin
87
+ if client.tuplespace.respond_to? :new
88
+ client.tuplespace.new
89
+ else
90
+ client.tuplespace
91
+ end
92
+ end
93
+ end
89
94
 
90
95
  def start
91
96
  return if @worker_thread
@@ -119,7 +124,6 @@ class Tupelo::Client
119
124
  worker_thread.join if worker_thread ## join(limit)?
120
125
  msg_reader_thread.kill if msg_reader_thread
121
126
  @atdo.stop if @atdo
122
- ## optionally write final state (including global_tick) to disk
123
127
  end
124
128
 
125
129
  # stop without any remote handshaking
@@ -203,27 +207,14 @@ class Tupelo::Client
203
207
 
204
208
  def update_to_tick tick
205
209
  # at this point we know that the seq messages now accumulating in
206
- # cmd_queue are tick+1, tick+2, .... In particular, if tick==0, we don't
207
- # need to get state from archiver.
210
+ # cmd_queue are tick+1, tick+2, ....
208
211
  log.debug {"update_to_tick #{tick}"}
209
212
 
210
- if tick == 0
211
- @global_tick = 0
212
- return
213
- end
214
-
215
- if false
216
- ## load from file, update @global_tick, and see if equal to tick
217
- if @global_tick == tick
218
- return
219
- elsif @global_tick > tick
220
- raise "bad tick: #{@global_tick} > #{tick}"
221
- end
222
- end
223
-
224
213
  unless arc
225
- log.warn "no archiver provided; assuming pubsub mode; " +
226
- "some client ops (take and local read) will not work"
214
+ if tick > 0
215
+ log.warn "no archiver provided; assuming pubsub mode; " +
216
+ "some client ops (take and local read) will not work"
217
+ end
227
218
  @global_tick = tick
228
219
  log.info "global_tick = #{global_tick}"
229
220
  return
@@ -296,7 +287,8 @@ class Tupelo::Client
296
287
 
297
288
  if succeeded
298
289
  log.debug {"inserting #{op.writes}; deleting #{actual_tuples}"}
299
- tuplespace.transaction inserts: op.writes, deletes: actual_tuples
290
+ tuplespace.transaction inserts: op.writes, deletes: actual_tuples,
291
+ tick: @global_tick
300
292
  end
301
293
 
302
294
  notify_waiters.each do |waiter|
@@ -0,0 +1,86 @@
1
+ require 'sequel'
2
+
3
+ class Tupelo::PersistentArchiver
4
+ class Tuplespace
5
+ include Enumerable
6
+
7
+ attr_reader :zero_tolerance
8
+
9
+ def initialize(file: ":memory:",
10
+ zero_tolerance: Tupelo::Archiver::ZERO_TOLERANCE)
11
+ @db = Sequel.sqlite(:database => file)
12
+ @nzero = 0
13
+ @zero_tolerance = zero_tolerance
14
+ end
15
+
16
+ # note: multiple equal tuples are yielded once
17
+ def each
18
+ @db[:tuples].select(:packed, :count).each do |row| ## select as array?
19
+ packed, count = row.values_at(:packed, :count)
20
+ yield packed, count if count > 0
21
+ end
22
+ end
23
+
24
+ def insert packed
25
+ if packed has exact match in data table
26
+ inc count
27
+ else
28
+ let hash = packed_hash(str)
29
+ select rows with this hash
30
+
31
+ if op is insert
32
+ if rows.count == 0, insert new row, with count=1
33
+ else find row using packed_compare(str, packed_tuple)
34
+ if found, increment count
35
+ else insert new row, with count=1
36
+
37
+
38
+ @db[:tuples].insert
39
+ @counts[tuple] += 1
40
+ end
41
+
42
+ def delete_once tuple
43
+ if @counts[tuple] > 0
44
+ @counts[tuple] -= 1
45
+ if @counts[tuple] == 0
46
+ @nzero += 1
47
+ clear_excess_zeros if @nzero > zero_tolerance
48
+ end
49
+ true
50
+ else
51
+ false
52
+ end
53
+ end
54
+
55
+ def transaction inserts: [], deletes: []
56
+ deletes.each do |tuple|
57
+ delete_once tuple or raise "bug"
58
+ end
59
+
60
+ inserts.each do |tuple|
61
+ insert tuple.freeze ## freeze recursively
62
+ end
63
+ end
64
+
65
+ def clear_excess_zeros
66
+ nd = (@nzero - zero_tolerance / 2)
67
+ @counts.delete_if {|tuple, count| count == 0 && (nd-=1) >= 0}
68
+ end
69
+
70
+ def find_distinct_matches_for tuples
71
+ h = Hash.new(0)
72
+ tuples.map do |tuple|
73
+ if @counts[tuple] > h[tuple]
74
+ h[tuple] += 1
75
+ tuple
76
+ else
77
+ nil
78
+ end
79
+ end
80
+ end
81
+
82
+ def find_match_for tuple
83
+ @counts[tuple] > 0 && tuple
84
+ end
85
+ end
86
+ end
@@ -0,0 +1,114 @@
1
+ require 'funl/history-worker'
2
+
3
+ class Tupelo::PersistentArchiver
4
+ class Worker < Tupelo::Client::Worker
5
+ include Funl::HistoryWorker
6
+
7
+ def initialize *args
8
+ super
9
+ @scheduled_actions = Hash.new {|h,k| h[k] = []}
10
+ end
11
+
12
+ def handle_client_request req
13
+ case req
14
+ when Tupelo::Archiver::ForkRequest
15
+ handle_fork_request req
16
+ else
17
+ super
18
+ end
19
+ end
20
+
21
+ def handle_fork_request req
22
+ stream = client.arc_server_stream_for req.io
23
+
24
+ begin
25
+ op, tags, tick = stream.read
26
+ rescue EOFError
27
+ log.debug {"#{stream.peer_name} disconnected from archiver"}
28
+ return
29
+ rescue => ex
30
+ log.error "in fork for #{stream || req.io}: #{ex.inspect}"
31
+ end
32
+
33
+ log.info {
34
+ "#{stream.peer_name} requested #{op.inspect} at tick=#{tick}" +
35
+ (tags ? " on #{tags}" : "")}
36
+
37
+ if tick <= global_tick
38
+ fork_for_op op, tags, tick, stream, req
39
+ else
40
+ at_tick tick do
41
+ fork_for_op op, tags, tick, stream, req
42
+ end
43
+ end
44
+ end
45
+
46
+ def fork_for_op op, tags, tick, stream, req
47
+ fork do
48
+ begin
49
+ case op
50
+ when "new client"
51
+ raise "Unimplemented" ###
52
+ when "get range" ### handle this in Funl::HistoryWorker
53
+ raise "Unimplemented" ###
54
+ when GET_TUPLESPACE
55
+ send_tuplespace stream, tags
56
+ else
57
+ raise "Unknown operation: #{op.inspect}"
58
+ end
59
+ rescue EOFError
60
+ log.debug {"#{stream.peer_name} disconnected from archiver"}
61
+ rescue => ex
62
+ log.error "in fork for #{stream || req.io}: #{ex.inspect}"
63
+ end
64
+ end
65
+ ensure
66
+ req.io.close
67
+ end
68
+
69
+ def at_tick tick, &action
70
+ @scheduled_actions[tick] << action
71
+ end
72
+
73
+ def handle_message msg
74
+ super
75
+ actions = @scheduled_actions.delete(global_tick)
76
+ actions and actions.each do |action|
77
+ action.call
78
+ end
79
+ end
80
+
81
+ def send_tuplespace stream, templates
82
+ log.info {
83
+ "send_tuplespace to #{stream.peer_name} " +
84
+ "at tick #{global_tick.inspect} " +
85
+ (templates ? " with templates #{templates.inspect}" : "")}
86
+
87
+ stream << [global_tick]
88
+
89
+ if templates
90
+ templates = templates.map {|t| Tupelo::Client::Template.new t}
91
+ tuplespace.each do |tuple, count|
92
+ if templates.any? {|template| template === tuple}
93
+ count.times do
94
+ stream << tuple
95
+ ## optimization: use stream.write_to_buffer
96
+ end
97
+ end
98
+ ## optimize this if templates have simple form, such as
99
+ ## [ [str1, nil, ...], [str2, nil, ...], ...]
100
+ end
101
+ else
102
+ tuplespace.each do |tuple, count|
103
+ count.times do ## just dump and send str * count?
104
+ stream << tuple ## optimize this, and cache the serial
105
+ ## optimization: use stream.write_to_buffer
106
+ end
107
+ end
108
+ end
109
+
110
+ stream << nil # terminator
111
+ ## stream.flush or close if write_to_buffer used above
112
+ end
113
+ end
114
+ end
@@ -0,0 +1,86 @@
1
+ require 'tupelo/client'
2
+ require 'funl/history-client'
3
+
4
+ class Tupelo::PersistentArchiver < Tupelo::Client; end
5
+
6
+ require 'tupelo/tuplets/persistent-archiver/worker'
7
+ require 'tupelo/tuplets/persistent-archiver/tuplespace'
8
+
9
+ module Tupelo
10
+ class PersistentArchiver
11
+ include Funl::HistoryClient
12
+
13
+ attr_reader :server
14
+ attr_reader :server_thread
15
+
16
+ # How many tuples with count=0 do we permit before cleaning up?
17
+ ZERO_TOLERANCE = 1000
18
+
19
+ def initialize server, **opts
20
+ super arc: nil, tuplespace: Tupelo::PersistentArchiver::Tuplespace, **opts
21
+ @server = server
22
+ end
23
+
24
+ # three kinds of requests:
25
+ #
26
+ # 1. fork a new client, with given Client class, and subselect
27
+ # using given templates
28
+ #
29
+ # 2. accept tcp/unix socket connection and fork, and then:
30
+ #
31
+ # a. dump subspace matching given templates OR
32
+ #
33
+ # b. dump all ops in a given range of the global sequence
34
+ # matching given templates
35
+ #
36
+ # the fork happens when tuplespace is consistent; we
37
+ # do this by passing cmd to worker thread, with conn
38
+ class ForkRequest
39
+ attr_reader :io
40
+ def initialize io
41
+ @io = io
42
+ end
43
+ end
44
+
45
+ def make_worker
46
+ Tupelo::PersistentArchiver::Worker.new self
47
+ end
48
+
49
+ def start
50
+ ## load from file?
51
+ super # start worker thread
52
+ @server_thread = Thread.new do
53
+ run
54
+ end
55
+ end
56
+
57
+ def stop
58
+ server_thread.kill if server_thread
59
+ super # stop worker thread
60
+ end
61
+
62
+ def run
63
+ loop do
64
+ ## nonblock_accept?
65
+ Thread.new(server.accept) do |conn|
66
+ handle_conn conn
67
+ end
68
+
69
+ ## periodically send worker request to dump space to file?
70
+ end
71
+ rescue => ex
72
+ log.error ex
73
+ raise
74
+ end
75
+
76
+ def handle_conn conn
77
+ log.debug {"accepted #{conn.inspect}"}
78
+ begin
79
+ worker << ForkRequest.new(conn)
80
+ rescue => ex
81
+ log.error ex
82
+ raise
83
+ end
84
+ end
85
+ end
86
+ end
@@ -1,3 +1,3 @@
1
1
  module Tupelo
2
- VERSION = "0.7"
2
+ VERSION = "0.8"
3
3
  end
@@ -1,3 +1,5 @@
1
+ require 'fiber'
2
+
1
3
  require 'tupelo/client/reader'
2
4
  require 'tupelo/client/transaction'
3
5
 
@@ -16,23 +18,91 @@ class MockClient
16
18
  attr_accessor :arc
17
19
  attr_accessor :start_tick
18
20
 
19
- def updater
20
- @updater ||=
21
- Fiber.new do
22
- loop do
23
- log.debug "update begin"
24
- worker.update
25
- log.debug "update end"
26
- Fiber.yield
21
+ class IsBlocked < RuntimeError; end
22
+ class IsDone < RuntimeError; end
23
+
24
+ def update
25
+ worker.update
26
+ end
27
+
28
+ def make_queue
29
+ MockQueue.new
30
+ end
31
+
32
+ def will &block
33
+ (@will_do ||= []) << Fiber.new { instance_eval &block }
34
+ self
35
+ end
36
+
37
+ def step
38
+ loop do
39
+ fiber = @will_do[0] or raise IsDone, "nothing to do"
40
+
41
+ if fiber.alive?
42
+ update
43
+ val = fiber.resume
44
+ update
45
+ return val
46
+ end
47
+
48
+ @will_do.shift
49
+ end
50
+ end
51
+
52
+ def run limit: 100
53
+ loop do
54
+ fiber = @will_do[0] or raise IsDone, "nothing to do"
55
+
56
+ count = 0
57
+ while fiber.alive?
58
+ update
59
+ val = fiber.resume
60
+ update
61
+ if fiber.alive? or @will_do.size > 1
62
+ if val == :block
63
+ count += 1
64
+ if count > limit
65
+ raise IsBlocked, "exceeded blocking limit"
66
+ end
67
+ else
68
+ count = 0
69
+ yield val if block_given?
70
+ end
71
+ else
72
+ return val
27
73
  end
28
74
  end
75
+
76
+ @will_do.shift
77
+ end
29
78
  end
30
79
 
31
- def update
32
- updater.resume
80
+ def run_until_blocked limit: 100, &block
81
+ begin
82
+ run limit: limit, &block
83
+ rescue IsBlocked
84
+ return
85
+ end
86
+ raise IsDone, "run_until_blocked never blocked"
33
87
  end
34
88
 
35
- def make_queue
36
- MockQueue.new
89
+ def now limit: 100, &block
90
+ fiber = Fiber.new { instance_eval &block }
91
+ val = nil
92
+ count = 0
93
+ update
94
+ while fiber.alive?
95
+ val = fiber.resume
96
+ if val == :block
97
+ count += 1
98
+ if count > limit
99
+ raise IsBlocked, "cannot now do that -- exceeded blocking limit"
100
+ end
101
+ else
102
+ count = 0
103
+ end
104
+ update
105
+ end
106
+ val
37
107
  end
38
108
  end
@@ -28,8 +28,8 @@ class MockQueue
28
28
  while @entries.empty?
29
29
  Fiber.yield :block
30
30
  end
31
- rescue FiberError
32
- raise QueueEmptyError, "queue empty"
31
+ rescue FiberError => ex
32
+ raise QueueEmptyError, "queue empty (#{ex.message})"
33
33
  end
34
34
 
35
35
  val = @entries.shift
@@ -0,0 +1,103 @@
1
+ require 'minitest/autorun'
2
+
3
+ require 'mock-client.rb'
4
+
5
+ class TestMockClient < Minitest::Test
6
+ class MockWorker
7
+ def update
8
+ end
9
+ end
10
+
11
+ def test_step
12
+ c = MockClient.new
13
+ c.worker = MockWorker.new
14
+
15
+ c.will do
16
+ 3.times do |i|
17
+ Fiber.yield i
18
+ end
19
+ "done"
20
+ end
21
+
22
+ assert_equal 0, c.step
23
+ assert_equal 1, c.step
24
+ assert_equal 2, c.step
25
+ assert_equal "done", c.step
26
+ assert_raises MockClient::IsDone do
27
+ c.step
28
+ end
29
+ end
30
+
31
+ def test_run
32
+ c = MockClient.new
33
+ c.worker = MockWorker.new
34
+
35
+ c.will do
36
+ 3.times do |i|
37
+ Fiber.yield i
38
+ end
39
+ "done"
40
+ end
41
+
42
+ a = []
43
+ r = c.run do |val|
44
+ a << val
45
+ end
46
+
47
+ assert_equal "done", r
48
+ assert_equal [0,1,2], a
49
+ end
50
+
51
+ def test_run_until_blocked
52
+ c = MockClient.new
53
+ c.worker = MockWorker.new
54
+
55
+ c.will do
56
+ 3.times do |i|
57
+ Fiber.yield i
58
+ end
59
+ 11.times do
60
+ Fiber.yield :block
61
+ end
62
+ "done"
63
+ end
64
+
65
+ a = []
66
+ c.run_until_blocked limit:10 do |val|
67
+ a << val
68
+ end
69
+
70
+ assert_equal [0,1,2], a
71
+
72
+ r = c.run
73
+ assert_equal "done", r
74
+ end
75
+
76
+ def test_now
77
+ c = MockClient.new
78
+ c.worker = MockWorker.new
79
+
80
+ result = c.now do
81
+ 3.times do
82
+ Fiber.yield :block
83
+ end
84
+ "done"
85
+ end
86
+
87
+ assert_equal "done", result
88
+ end
89
+
90
+ def test_now_limit
91
+ c = MockClient.new
92
+ c.worker = MockWorker.new
93
+
94
+ assert_raises MockClient::IsBlocked do
95
+ c.now limit: 2 do
96
+ 3.times do
97
+ Fiber.yield :block
98
+ end
99
+ "done"
100
+ end
101
+ end
102
+ end
103
+ end