omf_oml 0.9.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +2 -0
- data/Gemfile +4 -0
- data/Rakefile +10 -0
- data/lib/omf_oml/endpoint.rb +170 -0
- data/lib/omf_oml/indexed_table.rb +61 -0
- data/lib/omf_oml/network.rb +467 -0
- data/lib/omf_oml/oml_tuple.rb +64 -0
- data/lib/omf_oml/schema.rb +200 -0
- data/lib/omf_oml/sequel/sequel_server.rb +412 -0
- data/lib/omf_oml/sql_row.rb +302 -0
- data/lib/omf_oml/sql_source.rb +131 -0
- data/lib/omf_oml/table.rb +227 -0
- data/lib/omf_oml/tuple.rb +110 -0
- data/lib/omf_oml/version.rb +6 -0
- data/lib/omf_oml.rb +4 -0
- data/omf_oml.gemspec +26 -0
- metadata +73 -0
@@ -0,0 +1,302 @@
|
|
1
|
+
|
2
|
+
require 'omf_oml/tuple'
|
3
|
+
|
4
|
+
module OMF::OML
|
5
|
+
|
6
|
+
|
7
|
+
|
8
|
+
# Read the content of a table and feeds it out as a tuple store.
|
9
|
+
# After creation of the object. The actual tuple feed is started
|
10
|
+
# with a call to +run+.
|
11
|
+
#
|
12
|
+
class OmlSqlRow < OmlTuple
|
13
|
+
|
14
|
+
# *opts:
|
15
|
+
# - offset: Ignore first +offset+ rows. If negative or zero serve +offset+ rows initially
|
16
|
+
# - limit: Number of rows to fetch each time [1000]
|
17
|
+
# - check_interval: Interval in seconds when to check for new data. If 0, only run once.
|
18
|
+
#
|
19
|
+
def initialize(table_name, db_file, source, opts = {})
|
20
|
+
@sname = table_name
|
21
|
+
@db_file = db_file
|
22
|
+
@source = source
|
23
|
+
|
24
|
+
unless @offset = opts[:offset]
|
25
|
+
@offset = 0
|
26
|
+
end
|
27
|
+
@limit = opts[:limit]
|
28
|
+
@limit = 1000 unless @limit
|
29
|
+
|
30
|
+
@check_interval = opts[:check_interval]
|
31
|
+
@check_interval = 0 unless @check_interval
|
32
|
+
|
33
|
+
|
34
|
+
@on_new_vector_proc = {}
|
35
|
+
|
36
|
+
schema = find_schema
|
37
|
+
super table_name, schema
|
38
|
+
end
|
39
|
+
|
40
|
+
|
41
|
+
# Return a specific element of the vector identified either
|
42
|
+
# by it's name, or its col index
|
43
|
+
#
|
44
|
+
def [](name_or_index)
|
45
|
+
@vprocs[name_or_index].call(@raw)
|
46
|
+
end
|
47
|
+
|
48
|
+
# Return the elements of the vector as an array
|
49
|
+
def to_a(include_oml_internals = false)
|
50
|
+
include_oml_internals ? @row.dup : @row[4 .. -1]
|
51
|
+
end
|
52
|
+
|
53
|
+
# Return an array including the values for the names elements
|
54
|
+
# given as parameters.
|
55
|
+
#
|
56
|
+
def select(*col_names)
|
57
|
+
r = @row
|
58
|
+
col_names.collect do |n|
|
59
|
+
p = @vprocs[n]
|
60
|
+
#puts "#{n}::#{p}"
|
61
|
+
p ? p.call(r) : nil
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
def ts
|
66
|
+
self[:oml_ts_server]
|
67
|
+
end
|
68
|
+
|
69
|
+
def seq_no
|
70
|
+
self[:oml_seq]
|
71
|
+
end
|
72
|
+
|
73
|
+
# Register a proc to be called when a new tuple arrived
|
74
|
+
# on this stream.
|
75
|
+
#
|
76
|
+
def on_new_tuple(key = :_, &proc)
|
77
|
+
if proc
|
78
|
+
@on_new_vector_proc[key] = proc
|
79
|
+
else
|
80
|
+
@on_new_vector_proc.delete key
|
81
|
+
end
|
82
|
+
run() unless @on_new_vector_proc.empty?
|
83
|
+
end
|
84
|
+
|
85
|
+
# Create and return an +OmlTable+ which captures this tuple stream
|
86
|
+
#
|
87
|
+
# The argument to this method are either a list of columns to
|
88
|
+
# to capture in the table, or an array of column names and
|
89
|
+
# an option hash or just
|
90
|
+
# the option hash to be provided to the +OmlTable+ constructor.
|
91
|
+
#
|
92
|
+
# If a block is provided, any arriving tuple is executed by the block
|
93
|
+
# which is expected to return an array which is added to the table
|
94
|
+
# or nil in which case nothing is added. If a selector array is given the
|
95
|
+
# block is called with an array of values in the order of the columns
|
96
|
+
# listed in the selector. Otherwise, the block is called directly
|
97
|
+
# with the tuple.
|
98
|
+
#
|
99
|
+
# opts:
|
100
|
+
# :schema - use this schema instead for the table
|
101
|
+
# :name - name to use for table
|
102
|
+
# .... - remaining options to be passed to table constructur
|
103
|
+
#
|
104
|
+
def capture_in_table(*args, &block)
|
105
|
+
if args.length == 1
|
106
|
+
if args[0].kind_of?(Array)
|
107
|
+
select = args[0]
|
108
|
+
elsif args[0].kind_of?(Hash)
|
109
|
+
opts = args[0]
|
110
|
+
end
|
111
|
+
elsif args.length == 2 && args[1].kind_of?(Hash)
|
112
|
+
select = args[0]
|
113
|
+
opts = args[1]
|
114
|
+
else
|
115
|
+
opts = {}
|
116
|
+
select = args
|
117
|
+
end
|
118
|
+
|
119
|
+
if (tschema = opts.delete(:schema))
|
120
|
+
# unless tschema[0].kind_of? Hash
|
121
|
+
# tschema = tschema.collect do |cname| {:name => cname} end
|
122
|
+
# end
|
123
|
+
else
|
124
|
+
tschema = select.collect do |cname| {:name => cname} end
|
125
|
+
end
|
126
|
+
tname = opts.delete(:name) || stream_name
|
127
|
+
t = OMF::OML::OmlTable.new(tname, tschema, opts)
|
128
|
+
if block
|
129
|
+
self.on_new_tuple() do |v|
|
130
|
+
#puts "New vector(#{tname}): #{v.schema.inspect} ---- #{v.select(*select).size} <#{v.select(*select).join('|')}>"
|
131
|
+
if select
|
132
|
+
row = block.call(v.select(*select))
|
133
|
+
else
|
134
|
+
row = block.call(v)
|
135
|
+
end
|
136
|
+
if row
|
137
|
+
raise "Expected kind of Array, but got '#{row.inspect}'" unless row.kind_of?(Array)
|
138
|
+
t.add_row(row)
|
139
|
+
end
|
140
|
+
end
|
141
|
+
else
|
142
|
+
self.on_new_tuple() do |v|
|
143
|
+
#puts "New vector(#{tname}): #{v.select(*select).join('|')}"
|
144
|
+
t.add_row(v.select(*select))
|
145
|
+
end
|
146
|
+
end
|
147
|
+
t
|
148
|
+
end
|
149
|
+
|
150
|
+
def to_table(name = nil, opts = {})
|
151
|
+
unless name
|
152
|
+
name = @sname
|
153
|
+
end
|
154
|
+
t = OMF::OML::OmlTable.new(name, self.schema)
|
155
|
+
include_oml_internals = opts[:include_oml_internals] || true
|
156
|
+
self.on_new_tuple() do |v|
|
157
|
+
r = v.to_a(include_oml_internals)
|
158
|
+
t.add_row(r)
|
159
|
+
end
|
160
|
+
t
|
161
|
+
end
|
162
|
+
|
163
|
+
|
164
|
+
protected
|
165
|
+
|
166
|
+
def find_schema()
|
167
|
+
stmt = _statement
|
168
|
+
cnames = stmt.columns
|
169
|
+
ctypes = stmt.types
|
170
|
+
schema = []
|
171
|
+
#schema << {:name => :oml_sender, :type => 'STRING'}
|
172
|
+
cnames.size.times do |i|
|
173
|
+
name = cnames[i].to_sym
|
174
|
+
schema << {:name => name, :type => ctypes[i]}
|
175
|
+
end
|
176
|
+
# Rename first col
|
177
|
+
first = schema[0]
|
178
|
+
raise "BUG: Should be 'name'" if first[:name] != :name
|
179
|
+
first[:name] = :oml_sender
|
180
|
+
|
181
|
+
OmlSchema.new(schema)
|
182
|
+
end
|
183
|
+
|
184
|
+
# override
|
185
|
+
def process_schema(schema)
|
186
|
+
i = 0
|
187
|
+
@vprocs = {}
|
188
|
+
schema.each_column do |col|
|
189
|
+
name = col[:name]
|
190
|
+
j = i + 0
|
191
|
+
l = @vprocs[name] = lambda do |r| r[j] end
|
192
|
+
@vprocs[i - 4] = l if i > 4
|
193
|
+
i += 1
|
194
|
+
end
|
195
|
+
end
|
196
|
+
|
197
|
+
def run(in_thread = true)
|
198
|
+
return if @running
|
199
|
+
if in_thread
|
200
|
+
if @db
|
201
|
+
# force opening of database in new thread
|
202
|
+
begin
|
203
|
+
@db.close
|
204
|
+
rescue Exception
|
205
|
+
# ALERT: issues with finalising statments, don't know how to deal with it
|
206
|
+
end
|
207
|
+
@db = nil
|
208
|
+
@stmt = nil
|
209
|
+
end
|
210
|
+
Thread.new do
|
211
|
+
begin
|
212
|
+
_run
|
213
|
+
rescue Exception => ex
|
214
|
+
error "Exception in OmlSqlRow: #{ex}"
|
215
|
+
debug "Exception in OmlSqlRow: #{ex.backtrace.join("\n\t")}"
|
216
|
+
end
|
217
|
+
end
|
218
|
+
else
|
219
|
+
_run
|
220
|
+
end
|
221
|
+
end
|
222
|
+
|
223
|
+
private
|
224
|
+
|
225
|
+
def _run
|
226
|
+
if @check_interval <= 0
|
227
|
+
_run_once
|
228
|
+
else
|
229
|
+
@running = true
|
230
|
+
while (@running)
|
231
|
+
begin
|
232
|
+
unless _run_once
|
233
|
+
# All rows read, wait a bit for news to show up
|
234
|
+
sleep @check_interval
|
235
|
+
end
|
236
|
+
rescue Exception => ex
|
237
|
+
warn ex
|
238
|
+
debug "\t", ex.backtrace.join("\n\t")
|
239
|
+
end
|
240
|
+
end
|
241
|
+
end
|
242
|
+
end
|
243
|
+
|
244
|
+
# Run a query on database an serve all rows found one at a time.
|
245
|
+
# Return true if there might be more rows in the database
|
246
|
+
def _run_once
|
247
|
+
row_cnt = 0
|
248
|
+
_statement.execute(@limit, @offset).each do |r|
|
249
|
+
@row = r
|
250
|
+
@on_new_vector_proc.each_value do |proc|
|
251
|
+
proc.call(self)
|
252
|
+
end
|
253
|
+
row_cnt += 1
|
254
|
+
end
|
255
|
+
@offset += row_cnt
|
256
|
+
debug "Read #{row_cnt}/#{@offset} rows from '#{@sname}'"
|
257
|
+
row_cnt >= @limit # there could be more to read
|
258
|
+
end
|
259
|
+
|
260
|
+
def _statement
|
261
|
+
unless @stmt
|
262
|
+
db = @db = SQLite3::Database.new(@db_file)
|
263
|
+
@db.type_translation = true
|
264
|
+
table_name = t = @sname
|
265
|
+
if @offset < 0
|
266
|
+
cnt = db.execute("select count(*) from #{table_name};")[0][0].to_i
|
267
|
+
#debug "CNT: #{cnt}.#{cnt.class} offset: #{@offset}"
|
268
|
+
@offset = cnt + @offset # @offset was negative here
|
269
|
+
debug("Initial offset #{@offset} in '#{table_name}' with #{cnt} rows")
|
270
|
+
@offset = 0 if @offset < 0
|
271
|
+
end
|
272
|
+
#@stmt = db.prepare("SELECT * FROM #{table_name} LIMIT ? OFFSET ?;")
|
273
|
+
@stmt = db.prepare("SELECT _senders.name, #{t}.* FROM #{t} JOIN _senders WHERE #{t}.oml_sender_id = _senders.id LIMIT ? OFFSET ?;")
|
274
|
+
end
|
275
|
+
@stmt
|
276
|
+
end
|
277
|
+
end # OmlSqlRow
|
278
|
+
|
279
|
+
|
280
|
+
end
|
281
|
+
|
282
|
+
if $0 == __FILE__
|
283
|
+
|
284
|
+
require 'omf_oml/table'
|
285
|
+
ep = OMF::OML::OmlSqlSource.new('brooklynDemo.sq3')
|
286
|
+
ep.on_new_stream() do |s|
|
287
|
+
puts ">>>>>>>>>>>> New stream #{s.stream_name}: #{s.names.join(', ')}"
|
288
|
+
case s.stream_name
|
289
|
+
when 'wimaxmonitor_wimaxstatus'
|
290
|
+
select = [:oml_ts_server, :sender_hostname, :frequency, :signal, :rssi, :cinr, :avg_tx_pw]
|
291
|
+
when 'GPSlogger_gps_data'
|
292
|
+
select = [:oml_ts_server, :oml_sender_id, :lat, :lon]
|
293
|
+
end
|
294
|
+
|
295
|
+
s.on_new_vector() do |v|
|
296
|
+
puts "New vector(#{s.stream_name}): #{v.select(*select).join('|')}"
|
297
|
+
end
|
298
|
+
end
|
299
|
+
ep.run()
|
300
|
+
|
301
|
+
end
|
302
|
+
|
@@ -0,0 +1,131 @@
|
|
1
|
+
|
2
|
+
require 'sqlite3'
|
3
|
+
|
4
|
+
require 'omf_common/lobject'
|
5
|
+
require 'omf_oml/endpoint'
|
6
|
+
require 'omf_oml/tuple'
|
7
|
+
require 'omf_oml/sql_row'
|
8
|
+
|
9
|
+
module OMF::OML
|
10
|
+
|
11
|
+
# This class fetches the content of an sqlite3 database and serves it as multiple
|
12
|
+
# OML streams.
|
13
|
+
#
|
14
|
+
# After creating the object, the @run@ method needs to be called to
|
15
|
+
# start producing the streams.
|
16
|
+
#
|
17
|
+
class OmlSqlSource < OMF::Common::LObject
|
18
|
+
|
19
|
+
# +opts+ - passed on as +opts+ to the OmlSqlRow constructor
|
20
|
+
#
|
21
|
+
def initialize(db_file, opts = {})
|
22
|
+
raise "Can't find database '#{db_file}'" unless File.readable?(db_file)
|
23
|
+
@db_file = db_file
|
24
|
+
@running = false
|
25
|
+
@on_new_stream_procs = {}
|
26
|
+
@tables = {}
|
27
|
+
@table_opts = opts
|
28
|
+
end
|
29
|
+
|
30
|
+
# Register a proc to be called when a new stream was
|
31
|
+
# discovered on this endpoint.
|
32
|
+
#
|
33
|
+
def on_new_stream(key = :_, &proc)
|
34
|
+
if proc
|
35
|
+
@on_new_stream_procs[key] = proc
|
36
|
+
else
|
37
|
+
@on_new_stream_procs.delete key
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
|
42
|
+
# def report_new_stream(stream)
|
43
|
+
# @on_new_stream_procs.each_value do |proc|
|
44
|
+
# proc.call(stream)
|
45
|
+
# end
|
46
|
+
# end
|
47
|
+
|
48
|
+
# Start checking the database for tables and create a new stream
|
49
|
+
# by calling the internal +report_new_table+ method.
|
50
|
+
# If +check_every+ > 0 continue checking every +check_every+ seconds
|
51
|
+
# for new tables in the database, otherwise it's only checked once
|
52
|
+
#
|
53
|
+
#
|
54
|
+
def run(check_every = -1)
|
55
|
+
if check_every <= 0
|
56
|
+
run_once()
|
57
|
+
else
|
58
|
+
Thread.new do
|
59
|
+
@running = true
|
60
|
+
while (@running)
|
61
|
+
begin
|
62
|
+
run_once()
|
63
|
+
rescue Exception => ex
|
64
|
+
error "Exception in OmlSqlSource#run: #{ex}"
|
65
|
+
debug "Exception in OmlSqlSource#run: #{ex.backtrace.join("\n\t")}"
|
66
|
+
end
|
67
|
+
sleep check_every
|
68
|
+
end
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
def run_once()
|
74
|
+
unless @db
|
75
|
+
@db = SQLite3::Database.new(@db_file)
|
76
|
+
@db.type_translation = true
|
77
|
+
end
|
78
|
+
|
79
|
+
# first find tables
|
80
|
+
@db.execute( "SELECT * FROM sqlite_master WHERE type='table';") do |r|
|
81
|
+
table_name = r[1]
|
82
|
+
report_new_table(table_name, @table_opts) unless table_name.start_with?('_')
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
86
|
+
|
87
|
+
protected
|
88
|
+
|
89
|
+
# THis method is being called for every table detected in the database.
|
90
|
+
# It creates a new +OmlSqlRow+ object with +opts+ as the only argument.
|
91
|
+
# The tables is then streamed as a tuple stream.
|
92
|
+
# After the stream has been created, each block registered with
|
93
|
+
# +on_new_stream+ is then called with the new stream as its single
|
94
|
+
# argument.
|
95
|
+
#
|
96
|
+
def report_new_table(table_name, opts = {})
|
97
|
+
return if @tables.key?(table_name) # check if already reported before
|
98
|
+
debug "Found table: #{table_name}"
|
99
|
+
t = @tables[table_name] = OmlSqlRow.new(table_name, @db_file, self, opts)
|
100
|
+
@on_new_stream_procs.each_value do |proc|
|
101
|
+
proc.call(t)
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
end
|
106
|
+
|
107
|
+
|
108
|
+
|
109
|
+
end
|
110
|
+
|
111
|
+
if $0 == __FILE__
|
112
|
+
|
113
|
+
require 'omf_oml/table'
|
114
|
+
ep = OMF::OML::OmlSqlSource.new('brooklynDemo.sq3')
|
115
|
+
ep.on_new_stream() do |s|
|
116
|
+
puts ">>>>>>>>>>>> New stream #{s.stream_name}: #{s.names.join(', ')}"
|
117
|
+
case s.stream_name
|
118
|
+
when 'wimaxmonitor_wimaxstatus'
|
119
|
+
select = [:oml_ts_server, :sender_hostname, :frequency, :signal, :rssi, :cinr, :avg_tx_pw]
|
120
|
+
when 'GPSlogger_gps_data'
|
121
|
+
select = [:oml_ts_server, :oml_sender_id, :lat, :lon]
|
122
|
+
end
|
123
|
+
|
124
|
+
s.on_new_vector() do |v|
|
125
|
+
puts "New vector(#{s.stream_name}): #{v.select(*select).join('|')}"
|
126
|
+
end
|
127
|
+
end
|
128
|
+
ep.run()
|
129
|
+
|
130
|
+
end
|
131
|
+
|
@@ -0,0 +1,227 @@
|
|
1
|
+
|
2
|
+
require 'monitor'
|
3
|
+
|
4
|
+
require 'omf_common/lobject'
|
5
|
+
require 'omf_oml'
|
6
|
+
require 'omf_oml/schema'
|
7
|
+
|
8
|
+
|
9
|
+
|
10
|
+
module OMF::OML
|
11
|
+
|
12
|
+
# This class represents a database like table holding a sequence of OML measurements (rows) according
|
13
|
+
# a common schema.
|
14
|
+
#
|
15
|
+
class OmlTable < OMF::Common::LObject
|
16
|
+
|
17
|
+
def self.create(tname, schema, opts = {}, &on_before_row_added)
|
18
|
+
if (index = opts.delete(:index))
|
19
|
+
require 'omf_oml/indexed_table'
|
20
|
+
OmlIndexedTable.new(tname, index, schema, &on_before_row_added)
|
21
|
+
else
|
22
|
+
OmlTable.new(tname, schema, opts, &on_before_row_added)
|
23
|
+
end
|
24
|
+
end
|
25
|
+
include MonitorMixin
|
26
|
+
|
27
|
+
attr_reader :name
|
28
|
+
attr_accessor :max_size
|
29
|
+
attr_reader :schema
|
30
|
+
attr_reader :offset
|
31
|
+
|
32
|
+
#
|
33
|
+
# tname - Name of table
|
34
|
+
# schema - OmlSchema or Array containing [name, type*] for every column in table
|
35
|
+
# Table adds a '__id__' column at the beginning which keeps track of the rows unique id
|
36
|
+
# opts -
|
37
|
+
# :max_size - keep table to that size by dropping older rows
|
38
|
+
# :index - only keep the latest inserted row for a unique col value - messes with row order
|
39
|
+
#
|
40
|
+
def initialize(tname, schema, opts = {}, &on_before_row_added)
|
41
|
+
super tname
|
42
|
+
|
43
|
+
#@endpoint = endpoint
|
44
|
+
@name = tname
|
45
|
+
@schema = OmlSchema.create(schema)
|
46
|
+
unless @schema.name_at(0) == :__id__
|
47
|
+
@schema.insert_column_at(0, [:__id__, 'int'])
|
48
|
+
end
|
49
|
+
@opts = opts
|
50
|
+
if (index = opts[:index])
|
51
|
+
throw "No longer supported, use IndexedTable instead"
|
52
|
+
# @indexed_rows = {}
|
53
|
+
# @index_col = @schema.index_for_col(index)
|
54
|
+
end
|
55
|
+
@on_before_row_added = on_before_row_added
|
56
|
+
@offset = 0 # number of rows skipped before the first one recorded here
|
57
|
+
@rows = []
|
58
|
+
@row_id = 0 # Each new row is assigned an id
|
59
|
+
@max_size = opts[:max_size]
|
60
|
+
@on_content_changed = {}
|
61
|
+
end
|
62
|
+
|
63
|
+
def rows
|
64
|
+
#@indexed_rows ? @indexed_rows.values : @rows
|
65
|
+
@rows
|
66
|
+
end
|
67
|
+
|
68
|
+
# Register +callback+ to be called to process any newly
|
69
|
+
# offered row before it being added to internal storage.
|
70
|
+
# The callback's argument is the new row (TODO: in what form)
|
71
|
+
# and should return what is being added instead of the original
|
72
|
+
# row. If the +callback+ returns nil, nothing is being added.
|
73
|
+
#
|
74
|
+
def on_before_row_added(&callback)
|
75
|
+
@on_before_row_added = callback
|
76
|
+
end
|
77
|
+
|
78
|
+
# Register callback for when the content of the table is changes. The key
|
79
|
+
# allows for the callback to be removed by calling this method
|
80
|
+
# without a block. . If the
|
81
|
+
# optional 'offset' value is set to zero or a positive value,
|
82
|
+
# then the currently stored values starting at this index are being
|
83
|
+
# immediately sent to 'proc'. The 'proc' is expected to receive two
|
84
|
+
# parameters, an 'action' and the content changed. The 'action' is either
|
85
|
+
# ':added', or ':removed' and the content is an array of rows.
|
86
|
+
#
|
87
|
+
def on_content_changed(key, offset = -1, &proc)
|
88
|
+
#puts ">>>>>>> #{offset}"
|
89
|
+
if proc
|
90
|
+
@on_content_changed[key] = proc
|
91
|
+
if offset >= 0
|
92
|
+
#with_offset = proc.arity == 2
|
93
|
+
proc.call(:added, @rows[offset .. -1])
|
94
|
+
#.each_with_index do |r, i|
|
95
|
+
# with_offset ? proc.call(r, offset + i) : proc.call(r)
|
96
|
+
# end
|
97
|
+
end
|
98
|
+
else
|
99
|
+
@on_content_changed.delete key
|
100
|
+
end
|
101
|
+
end
|
102
|
+
|
103
|
+
# NOTE: +on_row_added+ callbacks are done within the monitor.
|
104
|
+
#
|
105
|
+
def add_row(row, needs_casting = false)
|
106
|
+
synchronize do
|
107
|
+
if row = _add_row(row, needs_casting)
|
108
|
+
_notify_content_changed(:added, [row])
|
109
|
+
end
|
110
|
+
end
|
111
|
+
end
|
112
|
+
|
113
|
+
# Return a new table which shadows this table but only contains
|
114
|
+
# rows with unique values in the column 'col_name' and of these the
|
115
|
+
# latest added rows to this table.
|
116
|
+
#
|
117
|
+
# col_name - Name of column to use for indexing
|
118
|
+
#
|
119
|
+
def indexed_by(col_name)
|
120
|
+
require 'omf_oml/indexed_table'
|
121
|
+
OmlIndexedTable.shadow(self, col_name)
|
122
|
+
end
|
123
|
+
|
124
|
+
# Add an array of rows to this table
|
125
|
+
#
|
126
|
+
def add_rows(rows, needs_casting = false)
|
127
|
+
synchronize do
|
128
|
+
added = rows.map { |row| _add_row(row, needs_casting) }
|
129
|
+
added = added.compact
|
130
|
+
unless added.empty?
|
131
|
+
_notify_content_changed(:added, added)
|
132
|
+
end
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
# Return a new table which only contains the rows of this
|
137
|
+
# table whose value in column 'col_name' is equal to 'col_value'
|
138
|
+
#
|
139
|
+
def create_sliced_table(col_name, col_value, table_opts = {})
|
140
|
+
sname = "#{@name}_slice_#{Kernel.rand}"
|
141
|
+
|
142
|
+
st = self.class.new(name, @schema, table_opts)
|
143
|
+
st.instance_variable_set(:@sname, sname)
|
144
|
+
st.instance_variable_set(:@master_ds, self)
|
145
|
+
def st.release
|
146
|
+
@master_ds.on_content_changed(@sname) # release callback
|
147
|
+
end
|
148
|
+
|
149
|
+
index = @schema.index_for_col(col_name)
|
150
|
+
on_content_changed(sname, 0) do |action, rows|
|
151
|
+
if action == :removed
|
152
|
+
warn "No support for removing rows from sliced table '#{sname}'."
|
153
|
+
next
|
154
|
+
end
|
155
|
+
rows.each do |row|
|
156
|
+
if row[index] == col_value
|
157
|
+
row = row[1 .. -1] # remove the row_id
|
158
|
+
debug "Add row '#{row.inspect}'"
|
159
|
+
st.add_row(row)
|
160
|
+
end
|
161
|
+
end
|
162
|
+
end
|
163
|
+
debug "Created sliced table from '#{@name}' (rows: #{st.rows.length}-#{@rows.length})"
|
164
|
+
st
|
165
|
+
end
|
166
|
+
|
167
|
+
def describe()
|
168
|
+
rows
|
169
|
+
end
|
170
|
+
|
171
|
+
def data_sources
|
172
|
+
self
|
173
|
+
end
|
174
|
+
|
175
|
+
private
|
176
|
+
|
177
|
+
# NOT synchronized
|
178
|
+
#
|
179
|
+
def _add_row(row, needs_casting = false)
|
180
|
+
if needs_casting
|
181
|
+
row = @schema.cast_row(row)
|
182
|
+
end
|
183
|
+
#puts row.inspect
|
184
|
+
if @on_before_row_added
|
185
|
+
row = @on_before_row_added.call(row)
|
186
|
+
end
|
187
|
+
return nil unless row
|
188
|
+
|
189
|
+
row.insert(0, @row_id += 1)
|
190
|
+
_add_row_finally(row)
|
191
|
+
end
|
192
|
+
|
193
|
+
# Finally add 'row' to internal storage. This would be hte method to
|
194
|
+
# overide in sub classes as this is thread safe and all other pre-storage
|
195
|
+
# test have been performed. Should return the row added, or nil if nothing
|
196
|
+
# was ultimately added.
|
197
|
+
#
|
198
|
+
def _add_row_finally(row)
|
199
|
+
# if @indexed_rows
|
200
|
+
# @indexed_rows[row[@index_col]] = row
|
201
|
+
# return
|
202
|
+
# end
|
203
|
+
|
204
|
+
@rows << row
|
205
|
+
if @max_size && @max_size > 0 && (s = @rows.size) > @max_size
|
206
|
+
if (removed_row = @rows.shift) # not necessarily fool proof, but fast
|
207
|
+
_notify_content_changed(:removed, [removed_row])
|
208
|
+
end
|
209
|
+
@offset = @offset + 1
|
210
|
+
end
|
211
|
+
row
|
212
|
+
end
|
213
|
+
|
214
|
+
def _notify_content_changed(action, rows)
|
215
|
+
@on_content_changed.each_value do |proc|
|
216
|
+
#puts "call: #{proc.inspect}"
|
217
|
+
#if proc.arity == 1
|
218
|
+
proc.call(action, rows)
|
219
|
+
#else
|
220
|
+
#proc.call(row, @offset)
|
221
|
+
#end
|
222
|
+
end
|
223
|
+
end
|
224
|
+
|
225
|
+
end # OMLTable
|
226
|
+
|
227
|
+
end
|