tupelo 0.21 → 0.22
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +171 -45
- data/bin/tup +51 -0
- data/example/counters/merge.rb +23 -3
- data/example/multi-tier/multi-sinatras.rb +5 -0
- data/example/riemann/event-subspace.rb +1 -4
- data/example/riemann/expiration-dbg.rb +2 -0
- data/example/riemann/producer.rb +4 -3
- data/example/riemann/v1/riemann.rb +2 -2
- data/example/riemann/v2/event-template.rb +71 -0
- data/example/riemann/v2/expirer.rb +1 -1
- data/example/riemann/v2/hash-store.rb +1 -0
- data/example/riemann/v2/ordered-event-store.rb +4 -1
- data/example/riemann/v2/riemann.rb +15 -8
- data/example/riemann/v2/sqlite-event-store.rb +117 -72
- data/example/sqlite/poi-store.rb +1 -1
- data/example/sqlite/poi-template.rb +2 -2
- data/example/sqlite/poi-v2.rb +2 -2
- data/example/subspaces/ramp.rb +9 -2
- data/example/tcp.rb +5 -0
- data/example/tiny-tcp-client.rb +15 -0
- data/example/tiny-tcp-service.rb +32 -0
- data/lib/tupelo/app.rb +4 -4
- data/lib/tupelo/app/builder.rb +2 -2
- data/lib/tupelo/app/irb-shell.rb +3 -3
- data/lib/tupelo/archiver.rb +0 -2
- data/lib/tupelo/archiver/tuplestore.rb +1 -1
- data/lib/tupelo/archiver/worker.rb +6 -6
- data/lib/tupelo/client.rb +2 -2
- data/lib/tupelo/client/reader.rb +3 -3
- data/lib/tupelo/client/scheduler.rb +1 -1
- data/lib/tupelo/client/subspace.rb +2 -2
- data/lib/tupelo/client/transaction.rb +28 -28
- data/lib/tupelo/client/tuplestore.rb +2 -2
- data/lib/tupelo/client/worker.rb +11 -10
- data/lib/tupelo/util/bin-circle.rb +8 -8
- data/lib/tupelo/util/boolean.rb +1 -1
- data/lib/tupelo/version.rb +1 -1
- data/test/lib/mock-client.rb +10 -10
- data/test/system/test-archiver.rb +2 -2
- data/test/unit/test-ops.rb +21 -21
- metadata +10 -20
- data/example/bingo/bingo-v2.rb +0 -20
- data/example/broker-queue.rb +0 -35
- data/example/child-of-child.rb +0 -34
- data/example/dataflow.rb +0 -21
- data/example/pregel/dist-opt.rb +0 -15
- data/example/riemann/v2/event-sql.rb +0 -56
- data/example/sqlite/tmp/poi-sqlite.rb +0 -35
- data/example/subspaces/addr-book-v1.rb +0 -104
- data/example/subspaces/addr-book-v2.rb +0 -16
- data/example/subspaces/sorted-set-space-OLD.rb +0 -130
- data/lib/tupelo/tuplets/persistent-archiver.rb +0 -86
- data/lib/tupelo/tuplets/persistent-archiver/tuplespace.rb +0 -91
- data/lib/tupelo/tuplets/persistent-archiver/worker.rb +0 -114
@@ -1,86 +0,0 @@
|
|
1
|
-
require 'tupelo/client'
|
2
|
-
require 'funl/history-client'
|
3
|
-
|
4
|
-
class Tupelo::PersistentArchiver < Tupelo::Client; end
|
5
|
-
|
6
|
-
require 'tupelo/tuplets/persistent-archiver/worker'
|
7
|
-
require 'tupelo/tuplets/persistent-archiver/tuplespace'
|
8
|
-
|
9
|
-
module Tupelo
|
10
|
-
class PersistentArchiver
|
11
|
-
include Funl::HistoryClient
|
12
|
-
|
13
|
-
attr_reader :server
|
14
|
-
attr_reader :server_thread
|
15
|
-
|
16
|
-
# How many tuples with count=0 do we permit before cleaning up?
|
17
|
-
ZERO_TOLERANCE = 1000
|
18
|
-
|
19
|
-
def initialize server, **opts
|
20
|
-
super arc: nil, tuplespace: Tupelo::PersistentArchiver::Tuplespace, **opts
|
21
|
-
@server = server
|
22
|
-
end
|
23
|
-
|
24
|
-
# three kinds of requests:
|
25
|
-
#
|
26
|
-
# 1. fork a new client, with given Client class, and subselect
|
27
|
-
# using given templates
|
28
|
-
#
|
29
|
-
# 2. accept tcp/unix socket connection and fork, and then:
|
30
|
-
#
|
31
|
-
# a. dump subspace matching given templates OR
|
32
|
-
#
|
33
|
-
# b. dump all ops in a given range of the global sequence
|
34
|
-
# matching given templates
|
35
|
-
#
|
36
|
-
# the fork happens when tuplespace is consistent; we
|
37
|
-
# do this by passing cmd to worker thread, with conn
|
38
|
-
class ForkRequest
|
39
|
-
attr_reader :io
|
40
|
-
def initialize io
|
41
|
-
@io = io
|
42
|
-
end
|
43
|
-
end
|
44
|
-
|
45
|
-
def make_worker
|
46
|
-
Tupelo::PersistentArchiver::Worker.new self
|
47
|
-
end
|
48
|
-
|
49
|
-
def start
|
50
|
-
## load from file?
|
51
|
-
super # start worker thread
|
52
|
-
@server_thread = Thread.new do
|
53
|
-
run
|
54
|
-
end
|
55
|
-
end
|
56
|
-
|
57
|
-
def stop
|
58
|
-
server_thread.kill if server_thread
|
59
|
-
super # stop worker thread
|
60
|
-
end
|
61
|
-
|
62
|
-
def run
|
63
|
-
loop do
|
64
|
-
## nonblock_accept?
|
65
|
-
Thread.new(server.accept) do |conn|
|
66
|
-
handle_conn conn
|
67
|
-
end
|
68
|
-
|
69
|
-
## periodically send worker request to dump space to file?
|
70
|
-
end
|
71
|
-
rescue => ex
|
72
|
-
log.error ex
|
73
|
-
raise
|
74
|
-
end
|
75
|
-
|
76
|
-
def handle_conn conn
|
77
|
-
log.debug {"accepted #{conn.inspect}"}
|
78
|
-
begin
|
79
|
-
worker << ForkRequest.new(conn)
|
80
|
-
rescue => ex
|
81
|
-
log.error ex
|
82
|
-
raise
|
83
|
-
end
|
84
|
-
end
|
85
|
-
end
|
86
|
-
end
|
@@ -1,91 +0,0 @@
|
|
1
|
-
require 'sequel'
|
2
|
-
|
3
|
-
class Tupelo::PersistentArchiver
|
4
|
-
class Tuplespace
|
5
|
-
include Enumerable
|
6
|
-
|
7
|
-
attr_reader :zero_tolerance
|
8
|
-
|
9
|
-
def initialize(file: ":memory:",
|
10
|
-
zero_tolerance: Tupelo::Archiver::ZERO_TOLERANCE)
|
11
|
-
@db = Sequel.sqlite(:database => file)
|
12
|
-
@nzero = 0
|
13
|
-
@zero_tolerance = zero_tolerance
|
14
|
-
end
|
15
|
-
|
16
|
-
# note: multiple equal tuples are yielded once
|
17
|
-
def each
|
18
|
-
@db[:tuples].select(:packed, :count).each do |row| ## select as array?
|
19
|
-
packed, count = row.values_at(:packed, :count)
|
20
|
-
yield packed, count if count > 0
|
21
|
-
end
|
22
|
-
end
|
23
|
-
|
24
|
-
def insert packed
|
25
|
-
if packed has exact match in data table
|
26
|
-
inc count
|
27
|
-
else
|
28
|
-
let hash = packed_hash(str)
|
29
|
-
select rows with this hash
|
30
|
-
|
31
|
-
if op is insert
|
32
|
-
if rows.count == 0, insert new row, with count=1
|
33
|
-
else find row using packed_compare(str, packed_tuple)
|
34
|
-
if found, increment count
|
35
|
-
else insert new row, with count=1
|
36
|
-
|
37
|
-
|
38
|
-
@db[:tuples].insert
|
39
|
-
@counts[tuple] += 1
|
40
|
-
end
|
41
|
-
|
42
|
-
def delete_once tuple
|
43
|
-
if @counts[tuple] > 0
|
44
|
-
@counts[tuple] -= 1
|
45
|
-
if @counts[tuple] == 0
|
46
|
-
@nzero += 1
|
47
|
-
clear_excess_zeros if @nzero > zero_tolerance
|
48
|
-
end
|
49
|
-
true
|
50
|
-
else
|
51
|
-
false
|
52
|
-
end
|
53
|
-
end
|
54
|
-
|
55
|
-
def transaction inserts: [], deletes: []
|
56
|
-
deletes.each do |tuple|
|
57
|
-
delete_once tuple or raise "bug"
|
58
|
-
end
|
59
|
-
|
60
|
-
inserts.each do |tuple|
|
61
|
-
insert tuple.freeze ## freeze recursively
|
62
|
-
end
|
63
|
-
end
|
64
|
-
|
65
|
-
def clear_excess_zeros
|
66
|
-
nd = (@nzero - zero_tolerance / 2)
|
67
|
-
@counts.delete_if {|tuple, count| count == 0 && (nd-=1) >= 0}
|
68
|
-
end
|
69
|
-
|
70
|
-
def find_distinct_matches_for tuples
|
71
|
-
h = Hash.new(0)
|
72
|
-
tuples.map do |tuple|
|
73
|
-
if @counts[tuple] > h[tuple]
|
74
|
-
h[tuple] += 1
|
75
|
-
tuple
|
76
|
-
else
|
77
|
-
nil
|
78
|
-
end
|
79
|
-
end
|
80
|
-
end
|
81
|
-
|
82
|
-
def find_match_for tuple
|
83
|
-
@counts[tuple] > 0 && tuple
|
84
|
-
end
|
85
|
-
|
86
|
-
### def find_match_for tuple, distinct_from: []
|
87
|
-
### tuple && @tuple_rec[tuple].count > distinct_from.count(tuple)
|
88
|
-
### ## is 'tuple &&' necessary?
|
89
|
-
### end
|
90
|
-
end
|
91
|
-
end
|
@@ -1,114 +0,0 @@
|
|
1
|
-
require 'funl/history-worker'
|
2
|
-
|
3
|
-
class Tupelo::PersistentArchiver
|
4
|
-
class Worker < Tupelo::Client::Worker
|
5
|
-
include Funl::HistoryWorker
|
6
|
-
|
7
|
-
def initialize *args
|
8
|
-
super
|
9
|
-
@scheduled_actions = Hash.new {|h,k| h[k] = []}
|
10
|
-
end
|
11
|
-
|
12
|
-
def handle_client_request req
|
13
|
-
case req
|
14
|
-
when Tupelo::Archiver::ForkRequest
|
15
|
-
handle_fork_request req
|
16
|
-
else
|
17
|
-
super
|
18
|
-
end
|
19
|
-
end
|
20
|
-
|
21
|
-
def handle_fork_request req
|
22
|
-
stream = client.arc_server_stream_for req.io
|
23
|
-
|
24
|
-
begin
|
25
|
-
op, tags, tick = stream.read
|
26
|
-
rescue EOFError
|
27
|
-
log.debug {"#{stream.peer_name} disconnected from archiver"}
|
28
|
-
return
|
29
|
-
rescue => ex
|
30
|
-
log.error "in fork for #{stream || req.io}: #{ex.inspect}"
|
31
|
-
end
|
32
|
-
|
33
|
-
log.info {
|
34
|
-
"#{stream.peer_name} requested #{op.inspect} at tick=#{tick}" +
|
35
|
-
(tags ? " on #{tags}" : "")}
|
36
|
-
|
37
|
-
if tick <= global_tick
|
38
|
-
fork_for_op op, tags, tick, stream, req
|
39
|
-
else
|
40
|
-
at_tick tick do
|
41
|
-
fork_for_op op, tags, tick, stream, req
|
42
|
-
end
|
43
|
-
end
|
44
|
-
end
|
45
|
-
|
46
|
-
def fork_for_op op, tags, tick, stream, req
|
47
|
-
fork do
|
48
|
-
begin
|
49
|
-
case op
|
50
|
-
when "new client"
|
51
|
-
raise "Unimplemented" ###
|
52
|
-
when "get range" ### handle this in Funl::HistoryWorker
|
53
|
-
raise "Unimplemented" ###
|
54
|
-
when GET_TUPLESPACE
|
55
|
-
send_tuplespace stream, tags
|
56
|
-
else
|
57
|
-
raise "Unknown operation: #{op.inspect}"
|
58
|
-
end
|
59
|
-
rescue EOFError
|
60
|
-
log.debug {"#{stream.peer_name} disconnected from archiver"}
|
61
|
-
rescue => ex
|
62
|
-
log.error "in fork for #{stream || req.io}: #{ex.inspect}"
|
63
|
-
end
|
64
|
-
end
|
65
|
-
ensure
|
66
|
-
req.io.close
|
67
|
-
end
|
68
|
-
|
69
|
-
def at_tick tick, &action
|
70
|
-
@scheduled_actions[tick] << action
|
71
|
-
end
|
72
|
-
|
73
|
-
def handle_message msg
|
74
|
-
super
|
75
|
-
actions = @scheduled_actions.delete(global_tick)
|
76
|
-
actions and actions.each do |action|
|
77
|
-
action.call
|
78
|
-
end
|
79
|
-
end
|
80
|
-
|
81
|
-
def send_tuplespace stream, templates
|
82
|
-
log.info {
|
83
|
-
"send_tuplespace to #{stream.peer_name} " +
|
84
|
-
"at tick #{global_tick.inspect} " +
|
85
|
-
(templates ? " with templates #{templates.inspect}" : "")}
|
86
|
-
|
87
|
-
stream << [global_tick]
|
88
|
-
|
89
|
-
if templates
|
90
|
-
templates = templates.map {|t| Tupelo::Client::Template.new t}
|
91
|
-
tuplespace.each do |tuple, count|
|
92
|
-
if templates.any? {|template| template === tuple}
|
93
|
-
count.times do
|
94
|
-
stream << tuple
|
95
|
-
## optimization: use stream.write_to_buffer
|
96
|
-
end
|
97
|
-
end
|
98
|
-
## optimize this if templates have simple form, such as
|
99
|
-
## [ [str1, nil, ...], [str2, nil, ...], ...]
|
100
|
-
end
|
101
|
-
else
|
102
|
-
tuplespace.each do |tuple, count|
|
103
|
-
count.times do ## just dump and send str * count?
|
104
|
-
stream << tuple ## optimize this, and cache the serial
|
105
|
-
## optimization: use stream.write_to_buffer
|
106
|
-
end
|
107
|
-
end
|
108
|
-
end
|
109
|
-
|
110
|
-
stream << nil # terminator
|
111
|
-
## stream.flush or close if write_to_buffer used above
|
112
|
-
end
|
113
|
-
end
|
114
|
-
end
|