odba 1.0.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,72 @@
1
+ #!/usr/bin/env ruby
2
+ #-- ODBA -- odba -- 13.05.2004 -- hwyss@ywesee.com rwaltert@ywesee.com mwalder@ywesee.com
3
+ # Copyright (C) 2004 Hannes Wyss
4
+ #
5
+ # This library is free software; you can redistribute it and/or
6
+ # modify it under the terms of the GNU Lesser General Public
7
+ # License as published by the Free Software Foundation; either
8
+ # version 2.1 of the License, or (at your option) any later version.
9
+ #
10
+ # This library is distributed in the hope that it will be useful,
11
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13
+ # Lesser General Public License for more details.
14
+ #
15
+ # You should have received a copy of the GNU Lesser General Public
16
+ # License along with this library; if not, write to the Free Software
17
+ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
+ #
19
+ # ywesee - intellectual capital connected, Winterthurerstrasse 52, CH-8006 Z�rich, Switzerland
20
+ # hwyss@ywesee.com
21
+ #++
22
+ # = ODBA - Object DataBase Access
23
+ #
24
+ # ODBA is an unintrusive Object Cache system. It adresses the crosscutting
25
+ # concern of object storage by disconnecting and serializing objects into
26
+ # storage. All disconnected connections are replaced by instances of
27
+ # ODBA::Stub, thus enabling transparent object-loading.
28
+ #
29
+ # ODBA supports:
30
+ # * transparent loading of connected objects
31
+ # * index-vectors
32
+ # * transactions
33
+ # * transparently fetches Hash-Elements without loading the entire Hash
34
+ #
35
+ # == Example
36
+ # include 'odba'
37
+ #
38
+ # # connect default storage manager to a relational database
39
+ # ODBA.storage.dbi = ODBA::ConnectionPool.new('DBI::pg::database', 'user', 'pw')
40
+ #
41
+ # class Counter
42
+ # include ODBA::Persistable
43
+ # def initialize
44
+ # @pos = 0
45
+ # end
46
+ # def up
47
+ # @pos += 1
48
+ # self.odba_store
49
+ # @pos
50
+ # end
51
+ # def down
52
+ # @pos -= 1
53
+ # self.odba_store
54
+ # @pos
55
+ # end
56
+ # end
57
+ #
58
+ # :main:lib/odba.rb
59
+
60
+ require 'odba/persistable'
61
+ require 'odba/storage'
62
+ require 'odba/cache'
63
+ require 'odba/stub'
64
+ require 'odba/marshal'
65
+ require 'odba/cache_entry'
66
+ require 'odba/odba_error'
67
+ require 'odba/index'
68
+ require 'odba/odba'
69
+
70
+ class Odba
71
+ VERSION = '1.0.0'
72
+ end
@@ -0,0 +1,71 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'date'
4
+ require 'strscan'
5
+
6
+ if RUBY_VERSION >= '1.9'
7
+ def u str
8
+ str
9
+ end
10
+ class Date
11
+ def self._load(str)
12
+ scn = StringScanner.new str
13
+ a = []
14
+ while match = scn.get_byte
15
+ case match
16
+ when ":"
17
+ len = scn.get_byte
18
+ name = scn.scan /.{#{Marshal.load("\x04\bi#{len}")}}/
19
+ when "i"
20
+ int = scn.get_byte
21
+ size, = int.unpack('c')
22
+ if size > 1 && size < 5
23
+ size.times do
24
+ int << scn.get_byte
25
+ end
26
+ end
27
+ dump = "\x04\bi" << int
28
+ a.push Marshal.load(dump)
29
+ end
30
+ end
31
+
32
+ ajd = of = sg = 0
33
+ if a.size == 3
34
+ num, den, sg = a
35
+ ajd = Rational(num,den)
36
+ ajd -= 1.to_r/2
37
+ else
38
+ num, den, of, sg = a
39
+ ajd = Rational(num,den)
40
+ end
41
+ new!(ajd, of, sg)
42
+ end
43
+ end
44
+ class Encoding
45
+ class Character
46
+ class UTF8 < String
47
+ module Methods
48
+ end
49
+ ## when loading Encoding::Character::UTF8 instances simply return
50
+ # an encoded String
51
+ def self._load data
52
+ str = Marshal.load(data)
53
+ str.force_encoding 'UTF-8'
54
+ str
55
+ end
56
+ end
57
+ end
58
+ end
59
+ else
60
+ class Date
61
+ def marshal_load a
62
+ @ajd, @of, @sg, = a
63
+ @__ca__ = {}
64
+ end
65
+ end
66
+ class Rational
67
+ def marshal_load a
68
+ @numerator, @denominator, = a
69
+ end
70
+ end
71
+ end
@@ -0,0 +1,603 @@
1
+ #!/usr/bin/env ruby
2
+ #-- Cache -- odba -- 29.04.2004 -- hwyss@ywesee.com rwaltert@ywesee.com mwalder@ywesee.com
3
+
4
+ require 'singleton'
5
+ require 'date'
6
+ begin
7
+ require 'rubygems'
8
+ gem 'fastthread', '>=0.6.3'
9
+ rescue LoadError
10
+ end
11
+ require 'thread'
12
+ require 'drb'
13
+
14
+ module ODBA
15
+ class Cache
16
+ include Singleton
17
+ include DRb::DRbUndumped
18
+ CLEANER_PRIORITY = 0 # :nodoc:
19
+ CLEANING_INTERVAL = 5 # :nodoc:
20
+ attr_accessor :cleaner_step, :destroy_age, :retire_age, :debug
21
+ def initialize # :nodoc:
22
+ if(self::class::CLEANING_INTERVAL > 0)
23
+ start_cleaner
24
+ end
25
+ @retire_age = 300
26
+ @cache_mutex = Mutex.new
27
+ @deferred_indices = []
28
+ @fetched = Hash.new
29
+ @prefetched = Hash.new
30
+ @clean_prefetched = false
31
+ @cleaner_offset = 0
32
+ @prefetched_offset = 0
33
+ @cleaner_step = 500
34
+ @loading_stats = {}
35
+ @peers = []
36
+ end
37
+ # Returns all objects designated by _bulk_fetch_ids_ and registers
38
+ # _odba_caller_ for each of them. Objects which are not yet loaded are loaded
39
+ # from ODBA#storage.
40
+ def bulk_fetch(bulk_fetch_ids, odba_caller)
41
+ instances = []
42
+ loaded_ids = []
43
+ bulk_fetch_ids.each { |id|
44
+ if(entry = fetch_cache_entry(id))
45
+ entry.odba_add_reference(odba_caller)
46
+ instances.push(entry.odba_object)
47
+ loaded_ids.push(id)
48
+ end
49
+ }
50
+ bulk_fetch_ids -= loaded_ids
51
+ unless(bulk_fetch_ids.empty?)
52
+ rows = ODBA.storage.bulk_restore(bulk_fetch_ids)
53
+ instances += bulk_restore(rows, odba_caller)
54
+ end
55
+ instances
56
+ end
57
+ def bulk_restore(rows, odba_caller = nil) # :nodoc:
58
+ retrieved_objects= []
59
+ rows.each { |row|
60
+ obj_id = row.at(0)
61
+ dump = row.at(1)
62
+ odba_obj = fetch_or_restore(obj_id, dump, odba_caller)
63
+ retrieved_objects.push(odba_obj)
64
+ }
65
+ retrieved_objects
66
+ end
67
+ def clean # :nodoc:
68
+ now = Time.now
69
+ start = Time.now if(@debug)
70
+ @cleaned = 0
71
+ if(@debug)
72
+ puts "starting cleaning cycle"
73
+ $stdout.flush
74
+ end
75
+ retire_horizon = now - @retire_age
76
+ @cleaner_offset = _clean(retire_horizon, @fetched, @cleaner_offset)
77
+ if(@clean_prefetched)
78
+ @prefetched_offset = _clean(retire_horizon, @prefetched,
79
+ @prefetched_offset)
80
+ end
81
+ if(@debug)
82
+ puts "cleaned: #{@cleaned} objects in #{Time.now - start} seconds"
83
+ puts "remaining objects in @fetched: #{@fetched.size}"
84
+ puts "remaining objects in @prefetched: #{@prefetched.size}"
85
+ mbytes = File.read("/proc/#{$$}/stat").split(' ').at(22).to_i / (2**20)
86
+ GC.start
87
+ puts "remaining objects in ObjectSpace: #{ObjectSpace.each_object {}}"
88
+ puts "memory-usage: #{mbytes}MB"
89
+ $stdout.flush
90
+ end
91
+ end
92
+ def _clean(retire_time, holder, offset) # :nodoc:
93
+ if(offset > holder.size)
94
+ offset = 0
95
+ end
96
+ counter = 0
97
+ cutoff = offset + @cleaner_step
98
+ holder.each_value { |value|
99
+ counter += 1
100
+ if(counter > offset && value.odba_old?(retire_time))
101
+ value.odba_retire && @cleaned += 1
102
+ end
103
+ return cutoff if(counter > cutoff)
104
+ }
105
+ cutoff
106
+ # every once in a while we'll get a 'hash modified during iteration'-Error.
107
+ # not to worry, we'll just try again later.
108
+ rescue StandardError
109
+ offset
110
+ end
111
+ # overrides the ODBA_PREFETCH constant and @odba_prefetch instance variable
112
+ # in Persistable. Use this if a secondary client is more memory-bound than
113
+ # performance-bound.
114
+ def clean_prefetched(flag=true)
115
+ if(@clean_prefetched = flag)
116
+ clean
117
+ end
118
+ end
119
+ def clear # :nodoc:
120
+ @fetched.clear
121
+ @prefetched.clear
122
+ end
123
+ # Creates or recreates automatically defined indices
124
+ def create_deferred_indices(drop_existing = false)
125
+ @deferred_indices.each { |definition|
126
+ name = definition.index_name
127
+ if(drop_existing && self.indices.include?(name))
128
+ drop_index(name)
129
+ end
130
+ unless(self.indices.include?(name))
131
+ index = create_index(definition)
132
+ if(index.target_klass.respond_to?(:odba_extent))
133
+ index.fill(index.target_klass.odba_extent)
134
+ end
135
+ end
136
+ }
137
+ end
138
+ # Creates a new index according to IndexDefinition
139
+ def create_index(index_definition, origin_module=Object)
140
+ transaction {
141
+ klass = if(index_definition.fulltext)
142
+ FulltextIndex
143
+ elsif(index_definition.resolve_search_term.is_a?(Hash))
144
+ ConditionIndex
145
+ else
146
+ Index
147
+ end
148
+ index = klass.new(index_definition, origin_module)
149
+ indices.store(index_definition.index_name, index)
150
+ indices.odba_store_unsaved
151
+ index
152
+ }
153
+ end
154
+ # Permanently deletes _object_ from the database and deconnects all connected
155
+ # Persistables
156
+ def delete(odba_object)
157
+ odba_id = odba_object.odba_id
158
+ name = odba_object.odba_name
159
+ odba_object.odba_notify_observers(:delete, odba_id, odba_object.object_id)
160
+ rows = ODBA.storage.retrieve_connected_objects(odba_id)
161
+ rows.each { |row|
162
+ id = row.first
163
+ # Self-Referencing objects don't have to be resaved
164
+ begin
165
+ if(connected_object = fetch(id, nil))
166
+ connected_object.odba_cut_connection(odba_object)
167
+ connected_object.odba_isolated_store
168
+ end
169
+ rescue OdbaError
170
+ warn "OdbaError ### deleting #{odba_object.class}:#{odba_id}"
171
+ warn " ### while looking for connected object #{id}"
172
+ end
173
+ }
174
+ delete_cache_entry(odba_id)
175
+ delete_cache_entry(name)
176
+ ODBA.storage.delete_persistable(odba_id)
177
+ delete_index_element(odba_object)
178
+ odba_object
179
+ end
180
+ def delete_cache_entry(key)
181
+ @cache_mutex.synchronize {
182
+ @fetched.delete(key)
183
+ @prefetched.delete(key)
184
+ }
185
+ end
186
+ def delete_index_element(odba_object) # :nodoc:
187
+ klass = odba_object.class
188
+ indices.each_value { |index|
189
+ index.delete(odba_object)
190
+ }
191
+ end
192
+ # Permanently deletes the index named _index_name_
193
+ def drop_index(index_name)
194
+ transaction {
195
+ ODBA.storage.drop_index(index_name)
196
+ self.delete(self.indices[index_name])
197
+ }
198
+ end
199
+ def drop_indices # :nodoc:
200
+ keys = self.indices.keys
201
+ keys.each{ |key|
202
+ drop_index(key)
203
+ }
204
+ end
205
+ # Queue an index for creation by #setup
206
+ def ensure_index_deferred(index_definition)
207
+ @deferred_indices.push(index_definition)
208
+ end
209
+ # Get all instances of a class (- a limited extent)
210
+ def extent(klass, odba_caller=nil)
211
+ bulk_fetch(ODBA.storage.extent_ids(klass), odba_caller)
212
+ end
213
+ # Get number of instances of a class
214
+ def count(klass)
215
+ ODBA.storage.extent_count(klass)
216
+ end
217
+ # Fetch a Persistable identified by _odba_id_. Registers _odba_caller_ with
218
+ # the CacheEntry. Loads the Persistable if it is not already loaded.
219
+ def fetch(odba_id, odba_caller=nil)
220
+ fetch_or_do(odba_id, odba_caller) {
221
+ load_object(odba_id, odba_caller)
222
+ }
223
+ end
224
+ def fetch_cache_entry(odba_id_or_name) # :nodoc:
225
+ @prefetched[odba_id_or_name] || @fetched[odba_id_or_name]
226
+ end
227
+ @@receiver_name = RUBY_VERSION >= '1.9' ? :@receiver : '@receiver'
228
+ def fetch_collection(odba_obj) # :nodoc:
229
+ collection = []
230
+ bulk_fetch_ids = []
231
+ rows = ODBA.storage.restore_collection(odba_obj.odba_id)
232
+ return collection if rows.empty?
233
+ rows.each { |row|
234
+ key = ODBA.marshaller.load(row[0])
235
+ value = ODBA.marshaller.load(row[1])
236
+ item = nil
237
+ if([key, value].any? { |item| item.instance_variable_get(@@receiver_name) })
238
+ odba_id = odba_obj.odba_id
239
+ warn "stub for #{item.class}:#{item.odba_id} was saved with receiver in collection of #{odba_obj.class}:#{odba_id}"
240
+ warn "repair: remove [#{odba_id}, #{row[0]}, #{row[1].length}]"
241
+ ODBA.storage.collection_remove(odba_id, row[0])
242
+ key = key.odba_isolated_stub
243
+ key_dump = ODBA.marshaller.dump(key)
244
+ value = value.odba_isolated_stub
245
+ value_dump = ODBA.marshaller.dump(value)
246
+ warn "repair: insert [#{odba_id}, #{key_dump}, #{value_dump.length}]"
247
+ ODBA.storage.collection_store(odba_id, key_dump, value_dump)
248
+ end
249
+ bulk_fetch_ids.push(key.odba_id)
250
+ bulk_fetch_ids.push(value.odba_id)
251
+ collection.push([key, value])
252
+ }
253
+ bulk_fetch_ids.compact!
254
+ bulk_fetch_ids.uniq!
255
+ bulk_fetch(bulk_fetch_ids, odba_obj)
256
+ collection.each { |pair|
257
+ pair.collect! { |item|
258
+ if(item.is_a?(ODBA::Stub))
259
+ ## don't fetch: that may result in a conflict when storing.
260
+ #fetch(item.odba_id, odba_obj)
261
+ item.odba_container = odba_obj
262
+ item
263
+ elsif(ce = fetch_cache_entry(item.odba_id))
264
+ warn "collection loaded unstubbed object: #{item.odba_id}"
265
+ ce.odba_add_reference(odba_obj)
266
+ ce.odba_object
267
+ else
268
+ item
269
+ end
270
+ }
271
+ }
272
+ collection
273
+ end
274
+ def fetch_collection_element(odba_id, key) # :nodoc:
275
+ key_dump = ODBA.marshaller.dump(key.odba_isolated_stub)
276
+ ## for backward-compatibility and robustness we only attempt
277
+ ## to load if there was a dump stored in the collection table
278
+ if(dump = ODBA.storage.collection_fetch(odba_id, key_dump))
279
+ item = ODBA.marshaller.load(dump)
280
+ if(item.is_a?(ODBA::Stub))
281
+ fetch(item.odba_id)
282
+ elsif(item.is_a?(ODBA::Persistable))
283
+ warn "collection_element was unstubbed object: #{item.odba_id}"
284
+ fetch_or_restore(item.odba_id, dump, nil)
285
+ else
286
+ item
287
+ end
288
+ end
289
+ end
290
+ def fetch_named(name, odba_caller, &block) # :nodoc:
291
+ fetch_or_do(name, odba_caller) {
292
+ dump = ODBA.storage.restore_named(name)
293
+ if(dump.nil?)
294
+ odba_obj = block.call
295
+ odba_obj.odba_name = name
296
+ odba_obj.odba_store(name)
297
+ odba_obj
298
+ else
299
+ fetch_or_restore(name, dump, odba_caller)
300
+ end
301
+ }
302
+ end
303
+ def fetch_or_do(obj_id, odba_caller, &block) # :nodoc:
304
+ if (cache_entry = fetch_cache_entry(obj_id)) && cache_entry._odba_object
305
+ cache_entry.odba_add_reference(odba_caller)
306
+ cache_entry.odba_object
307
+ else
308
+ block.call
309
+ end
310
+ end
311
+ def fetch_or_restore(odba_id, dump, odba_caller) # :nodoc:
312
+ fetch_or_do(odba_id, odba_caller) {
313
+ odba_obj, collection = restore(dump)
314
+ @cache_mutex.synchronize {
315
+ fetch_or_do(odba_id, odba_caller) {
316
+ cache_entry = CacheEntry.new(odba_obj)
317
+ cache_entry.odba_add_reference(odba_caller)
318
+ hash = odba_obj.odba_prefetch? ? @prefetched : @fetched
319
+ name = odba_obj.odba_name
320
+ hash.store(odba_obj.odba_id, cache_entry)
321
+ if name
322
+ hash.store(name, cache_entry)
323
+ end
324
+ odba_obj
325
+ }
326
+ }
327
+ }
328
+ end
329
+ def fill_index(index_name, targets)
330
+ self.indices[index_name].fill(targets)
331
+ end
332
+ # Checks wether the object identified by _odba_id_ has been loaded.
333
+ def include?(odba_id)
334
+ @fetched.include?(odba_id) || @prefetched.include?(odba_id)
335
+ end
336
+ def index_keys(index_name, length=nil)
337
+ index = indices.fetch(index_name)
338
+ index.keys(length)
339
+ end
340
+ def index_matches(index_name, substring, limit=nil, offset=0)
341
+ index = indices.fetch(index_name)
342
+ index.matches substring, limit, offset
343
+ end
344
+ # Returns a Hash-table containing all stored indices.
345
+ def indices
346
+ @indices ||= fetch_named('__cache_server_indices__', self) {
347
+ {}
348
+ }
349
+ end
350
+ def invalidate(odba_id)
351
+ ## when finalizers are run, no other threads will be scheduled,
352
+ # therefore we don't need to @cache_mutex.synchronize
353
+ @fetched.delete odba_id
354
+ @prefetched.delete odba_id
355
+ end
356
+ def invalidate!(*odba_ids)
357
+ odba_ids.each do |odba_id|
358
+ if entry = fetch_cache_entry(odba_id)
359
+ entry.odba_retire :force => true
360
+ end
361
+ invalidate odba_id
362
+ end
363
+ end
364
+ # Returns the next valid odba_id
365
+ def next_id
366
+ id = ODBA.storage.next_id
367
+ @peers.each do |peer|
368
+ peer.reserve_next_id id rescue DRb::DRbError
369
+ end
370
+ id
371
+ rescue OdbaDuplicateIdError
372
+ retry
373
+ end
374
+ # Use this to load all prefetchable Persistables from the db at once
375
+ def prefetch
376
+ bulk_restore(ODBA.storage.restore_prefetchable)
377
+ end
378
+ # prints loading statistics to $stdout
379
+ def print_stats
380
+ fmh = " %-20s | %10s | %5s | %6s | %6s | %6s | %-20s\n"
381
+ fmt = " %-20s | %10.3f | %5i | %6.3f | %6.3f | %6.3f | %s\n"
382
+ head = sprintf(fmh,
383
+ "class", "total", "count", "min", "max", "avg", "callers")
384
+ line = "-" * head.length
385
+ puts line
386
+ print head
387
+ puts line
388
+ @loading_stats.sort_by { |key, val|
389
+ val[:total_time]
390
+ }.reverse.each { |key, val|
391
+ key = key.to_s
392
+ if(key.length > 20)
393
+ key = key[-20,20]
394
+ end
395
+ avg = val[:total_time] / val[:count]
396
+ printf(fmt, key, val[:total_time], val[:count],
397
+ val[:times].min, val[:times].max, avg,
398
+ val[:callers].join(','))
399
+ }
400
+ puts line
401
+ $stdout.flush
402
+ end
403
+ # Register a peer that has access to the same DB backend
404
+ def register_peer peer
405
+ @peers.push(peer).uniq!
406
+ end
407
+ # Reserve an id with all registered peers
408
+ def reserve_next_id id
409
+ ODBA.storage.reserve_next_id id
410
+ end
411
+ # Clears the loading statistics
412
+ def reset_stats
413
+ @loading_stats.clear
414
+ end
415
+ # Find objects in an index
416
+ def retrieve_from_index(index_name, search_term, meta=nil)
417
+ index = indices.fetch(index_name)
418
+ ids = index.fetch_ids(search_term, meta)
419
+ if meta.respond_to?(:error_limit) && (limit = meta.error_limit) \
420
+ && (size = ids.size) > limit.to_i
421
+ error = OdbaResultLimitError.new
422
+ error.limit = limit
423
+ error.size = size
424
+ error.index = index_name
425
+ error.search_term = search_term
426
+ error.meta = meta
427
+ raise error
428
+ end
429
+ bulk_fetch(ids, nil)
430
+ end
431
+ # Create necessary DB-Structure / other storage-setup
432
+ def setup
433
+ ODBA.storage.setup
434
+ self.indices.each_key { |index_name|
435
+ ODBA.storage.ensure_target_id_index(index_name)
436
+ }
437
+ create_deferred_indices
438
+ nil
439
+ end
440
+ # Returns the total number of cached objects
441
+ def size
442
+ @prefetched.size + @fetched.size
443
+ end
444
+ def start_cleaner # :nodoc:
445
+ @cleaner = Thread.new {
446
+ Thread.current.priority = self::class::CLEANER_PRIORITY
447
+ loop {
448
+ sleep(self::class::CLEANING_INTERVAL)
449
+ begin
450
+ clean
451
+ rescue StandardError => e
452
+ puts e
453
+ puts e.backtrace
454
+ end
455
+ }
456
+ }
457
+ end
458
+ # Store a Persistable _object_ in the database
459
+ def store(object)
460
+ odba_id = object.odba_id
461
+ name = object.odba_name
462
+ object.odba_notify_observers(:store, odba_id, object.object_id)
463
+ if(ids = Thread.current[:txids])
464
+ ids.unshift([odba_id,name])
465
+ end
466
+ ## get target_ids before anything else
467
+ target_ids = object.odba_target_ids
468
+ changes = store_collection_elements(object)
469
+ prefetchable = object.odba_prefetch?
470
+ dump = object.odba_isolated_dump
471
+ ODBA.storage.store(odba_id, dump, name, prefetchable, object.class)
472
+ store_object_connections(odba_id, target_ids)
473
+ update_references(target_ids, object)
474
+ object = store_cache_entry(odba_id, object, name)
475
+ update_indices(object)
476
+ @peers.each do |peer|
477
+ peer.invalidate! odba_id rescue DRb::DRbError
478
+ end
479
+ object
480
+ end
481
+ def store_cache_entry(odba_id, object, name=nil) # :nodoc:
482
+ @cache_mutex.synchronize {
483
+ if cache_entry = fetch_cache_entry(odba_id)
484
+ cache_entry.update object
485
+ else
486
+ hash = object.odba_prefetch? ? @prefetched : @fetched
487
+ cache_entry = CacheEntry.new(object)
488
+ hash.store(odba_id, cache_entry)
489
+ unless(name.nil?)
490
+ hash.store(name, cache_entry)
491
+ end
492
+ end
493
+ cache_entry.odba_object
494
+ }
495
+ end
496
+ def store_collection_elements(odba_obj) # :nodoc:
497
+ odba_id = odba_obj.odba_id
498
+ collection = odba_obj.odba_collection.collect { |key, value|
499
+ [ ODBA.marshaller.dump(key.odba_isolated_stub),
500
+ ODBA.marshaller.dump(value.odba_isolated_stub) ]
501
+ }
502
+ old_collection = ODBA.storage.restore_collection(odba_id).collect { |row|
503
+ [row[0], row [1]]
504
+ }
505
+ changes = (old_collection - collection).each { |key_dump, _|
506
+ ODBA.storage.collection_remove(odba_id, key_dump)
507
+ }.size
508
+ changes + (collection - old_collection).each { |key_dump, value_dump|
509
+ ODBA.storage.collection_store(odba_id, key_dump, value_dump)
510
+ }.size
511
+ end
512
+ def store_object_connections(odba_id, target_ids) # :nodoc:
513
+ ODBA.storage.ensure_object_connections(odba_id, target_ids)
514
+ end
515
+ # Executes the block in a transaction. If the transaction fails, all
516
+ # affected Persistable objects are reloaded from the db (which by then has
517
+ # also performed a rollback). Rollback is quite inefficient at this time.
518
+ def transaction(&block)
519
+ Thread.current[:txids] = []
520
+ ODBA.storage.transaction(&block)
521
+ rescue Exception => excp
522
+ transaction_rollback
523
+ raise excp
524
+ ensure
525
+ Thread.current[:txids] = nil
526
+ end
527
+ def transaction_rollback # :nodoc:
528
+ if(ids = Thread.current[:txids])
529
+ ids.each { |id, name|
530
+ if(entry = fetch_cache_entry(id))
531
+ if(dump = ODBA.storage.restore(id))
532
+ odba_obj, collection = restore(dump)
533
+ entry.odba_replace!(odba_obj)
534
+ else
535
+ entry.odba_cut_connections!
536
+ delete_cache_entry(id)
537
+ delete_cache_entry(name)
538
+ end
539
+ end
540
+ }
541
+ end
542
+ end
543
+ # Unregister a peer
544
+ def unregister_peer peer
545
+ @peers.delete peer
546
+ end
547
+ def update_indices(odba_object) # :nodoc:
548
+ if(odba_object.odba_indexable?)
549
+ indices.each { |index_name, index|
550
+ index.update(odba_object)
551
+ }
552
+ end
553
+ end
554
+ def update_references(target_ids, object) # :nodoc:
555
+ target_ids.each { |odba_id|
556
+ if(entry = fetch_cache_entry(odba_id))
557
+ entry.odba_add_reference(object)
558
+ end
559
+ }
560
+ end
561
+ private
562
+ def load_object(odba_id, odba_caller)
563
+ start = Time.now if(@debug)
564
+ dump = ODBA.storage.restore(odba_id)
565
+ odba_obj = restore_object(odba_id, dump, odba_caller)
566
+ return odba_obj unless(@debug)
567
+ stats = (@loading_stats[odba_obj.class] ||= {
568
+ :count => 0, :times => [], :total_time => 0, :callers => [],
569
+ })
570
+ stats[:count] += 1
571
+ time = Time.now - start
572
+ stats[:times].push(time)
573
+ stats[:total_time] += time
574
+ stats[:callers].push(odba_caller.class).uniq!
575
+ if(time > 2)
576
+ names = []
577
+ odba_caller.instance_variables.each { |name|
578
+ if(odba_caller.instance_variable_get(name).odba_id == odba_id)
579
+ names.push(name)
580
+ end
581
+ }
582
+ printf("long load-time (%4.2fs) for odba_id %i: %s#%s\n",
583
+ time, odba_id, odba_caller, names.join(','))
584
+ end
585
+ odba_obj
586
+ end
587
+ def restore(dump)
588
+ odba_obj = ODBA.marshaller.load(dump)
589
+ unless(odba_obj.is_a?(Persistable))
590
+ odba_obj.extend(Persistable)
591
+ end
592
+ collection = fetch_collection(odba_obj)
593
+ odba_obj.odba_restore(collection)
594
+ [odba_obj, collection]
595
+ end
596
+ def restore_object(odba_id, dump, odba_caller)
597
+ if(dump.nil?)
598
+ raise OdbaError, "Unknown odba_id #{odba_id}"
599
+ end
600
+ fetch_or_restore(odba_id, dump, odba_caller)
601
+ end
602
+ end
603
+ end