odba 1.1.2 → 1.1.7

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
- SHA1:
3
- metadata.gz: d60345e57c1bfc54f935b1349f9e94da3a0a4135
4
- data.tar.gz: 8c7ffb139d6f7b93e99aa500357267a2c2429b26
2
+ SHA256:
3
+ metadata.gz: 85dc9b8b21d66f01d3d7d37d9a73ce0f62ad5a5e0669a824d541cdbccbcd342c
4
+ data.tar.gz: dac941a19b3261e7fae22d8e8ce24f7da06dcfab842038ed2807fbd1e85f97ef
5
5
  SHA512:
6
- metadata.gz: 71678fe42e0915eada7c995c611d01282b62564f494d9fd91bcf71b538a054a8a6f7014fffb3223447836bccfa55652994676314f216124b1b389ee5f3add161
7
- data.tar.gz: 5daaf796ac79b2fc707fcb4087535e8455533cd2f7fe566d6e9deeaf74d226c47d3c087349590f2cd36bc09982dad6b159d7f8ce58854239964b1b0254de5de1
6
+ metadata.gz: 91d1b5bbf91bc9fa9f2907e286a0c9a3598fc48fcc00892e7cea690cefde44d47bf0064a696a2a58de96ec155392605d079f62d7b3beaeaa2c86c72f36fc3880
7
+ data.tar.gz: 6e01aed62f94f16575b6b466454bf7ce4b2fa828e2db43adaf4e28c032c3f1f7c72b35927c14dcf309c49bff95216cebaf67eec2a73c474ddca85dc1580be247
@@ -0,0 +1,35 @@
1
+ # This workflow uses actions that are not certified by GitHub.
2
+ # They are provided by a third-party and are governed by
3
+ # separate terms of service, privacy policy, and support
4
+ # documentation.
5
+ # This workflow will download a prebuilt Ruby version, install dependencies and run tests with Rake
6
+ # For more information see: https://github.com/marketplace/actions/setup-ruby-jruby-and-truffleruby
7
+
8
+ name: Ruby
9
+
10
+ on:
11
+ push:
12
+ branches: [ master ]
13
+ pull_request:
14
+ branches: [ master ]
15
+
16
+ jobs:
17
+ test:
18
+ runs-on: ubuntu-latest
19
+ # Using a matrix fails, because /setup-ruby always invokes bundle install without any additional args
20
+ # Fixed by adding not defining the debugger group in the Gemfile
21
+ strategy:
22
+ fail-fast: false
23
+ matrix:
24
+ os: [ ubuntu]
25
+ ruby: [2.7, 3.0, head]
26
+ continue-on-error: ${{ endsWith(matrix.ruby, 'head') }}
27
+ steps:
28
+ - uses: actions/checkout@v2
29
+ - uses: ruby/setup-ruby@v1
30
+ with:
31
+ ruby-version: ${{ matrix.ruby }}
32
+ bundler-cache: true
33
+
34
+ - name: Run tests via rake test
35
+ run: bundle exec rake test
data/Gemfile CHANGED
@@ -2,16 +2,7 @@ source "https://rubygems.org"
2
2
 
3
3
  gemspec
4
4
 
5
- group 'test' do
6
- gem 'flexmock', "~> 1.3.0"
7
- gem 'test-unit'
8
- gem 'minitest'
9
- end
10
-
11
- group 'debugger' do
12
- gem 'pry'
13
- gem 'pry-nav'
14
- gem 'pry-rescue'
15
- gem 'pry-stack_explorer'
16
- gem 'pry-doc'
17
- end
5
+ # The group debugger must be disabled for using a matrix build via github/actions
6
+ group :debugger do
7
+ gem 'pry-byebug'
8
+ end if false
@@ -1,3 +1,29 @@
1
+ === 1.1.7 / 20.01.2021
2
+
3
+ * Reworked some tests
4
+ * Updated to use Ruby 3.0.0
5
+ * Added github actions
6
+ * Updated to use ydbi 0.5.7
7
+
8
+ === 1.1.6 / 23.01.2016
9
+
10
+ * Updated to use ydbi 0.5.6
11
+
12
+ === 1.1.5 / 23.01.2016
13
+
14
+ * Remove unused parameter dict for update_fulltext_index
15
+
16
+ === 1.1.4 / 13.12.2017
17
+
18
+ * Drop text search dictionaries/configuration before recreating
19
+ * Remove dictionary argument in fulltext search and index_definition
20
+
21
+ === 1.1.3 / 12.12.2016
22
+
23
+ * Avoid errors by always specifying "IF NOT EXISTS" when creating tables and indices
24
+ * Add utility method get_server_version
25
+ * Removed misleading check in generate_dictionary
26
+
1
27
  === 1.1.2 / 10.05.2016
2
28
 
3
29
  * requires now 'ydbi' and 'ydbd-pg'
@@ -52,6 +78,6 @@
52
78
 
53
79
  === 1.0.0 / 20.12.2010
54
80
 
55
- * Add ODBA.cache.index_matches(index_name, substring)
81
+ * Add ODBA.cache.index_matches(index_name, substring)
56
82
 
57
83
  * this new method returns all search-terms of a given index (identified by index_name) that start with substring.
data/Rakefile CHANGED
@@ -16,14 +16,13 @@ end
16
16
  desc "Run tests"
17
17
  task :default => :test
18
18
 
19
- desc 'Run odba with all commonly used combinations'
20
- task :test do
21
- log_file = 'suite.log'
22
- res = system("bash -c 'set -o pipefail && bundle exec ruby test/suite.rb 2>&1 | tee #{log_file}'")
23
- puts "Running test/suite.rb returned #{res.inspect}. Output was redirected to #{log_file}"
24
- exit 1 unless res
19
+ Rake::TestTask.new(:test) do |t|
20
+ t.libs << "test"
21
+ t.libs << "lib"
22
+ t.test_files = FileList["test/**/test_*.rb"]
25
23
  end
26
24
 
25
+
27
26
  require 'rake/clean'
28
27
  CLEAN.include FileList['pkg/*.gem']
29
28
 
@@ -17,14 +17,15 @@ module ODBA
17
17
  class Cache
18
18
  include Singleton
19
19
  include DRb::DRbUndumped
20
- CLEANER_PRIORITY = 0 # :nodoc:
21
- CLEANING_INTERVAL = 5 # :nodoc:
20
+ CLEANER_PRIORITY = 0 # :nodoc:
21
+ CLEANING_INTERVAL = 5 # :nodoc:
22
22
  attr_accessor :cleaner_step, :destroy_age, :retire_age, :debug, :file_lock
23
- def initialize # :nodoc:
23
+ def initialize # :nodoc:
24
24
  if(self::class::CLEANING_INTERVAL > 0)
25
25
  start_cleaner
26
26
  end
27
27
  @retire_age = 300
28
+ @receiver = nil
28
29
  @cache_mutex = Mutex.new
29
30
  @deferred_indices = []
30
31
  @fetched = Hash.new
@@ -36,9 +37,9 @@ module ODBA
36
37
  @loading_stats = {}
37
38
  @peers = []
38
39
  @file_lock = false
39
- @debug ||= false
40
+ @debug ||= false # Setting @debug to true makes two unit test fail!
40
41
  end
41
- # Returns all objects designated by _bulk_fetch_ids_ and registers
42
+ # Returns all objects designated by _bulk_fetch_ids_ and registers
42
43
  # _odba_caller_ for each of them. Objects which are not yet loaded are loaded
43
44
  # from ODBA#storage.
44
45
  def bulk_fetch(bulk_fetch_ids, odba_caller)
@@ -79,7 +80,7 @@ module ODBA
79
80
  retire_horizon = now - @retire_age
80
81
  @cleaner_offset = _clean(retire_horizon, @fetched, @cleaner_offset)
81
82
  if(@clean_prefetched)
82
- @prefetched_offset = _clean(retire_horizon, @prefetched,
83
+ @prefetched_offset = _clean(retire_horizon, @prefetched,
83
84
  @prefetched_offset)
84
85
  end
85
86
  if(@debug)
@@ -93,8 +94,8 @@ module ODBA
93
94
  $stdout.flush
94
95
  end
95
96
  end
96
- def _clean(retire_time, holder, offset) # :nodoc:
97
- if(offset > holder.size)
97
+ def _clean(retire_time, holder, offset) # :nodoc:
98
+ if(offset > holder.size)
98
99
  offset = 0
99
100
  end
100
101
  counter = 0
@@ -108,14 +109,14 @@ module ODBA
108
109
  return cutoff if(counter > cutoff)
109
110
  }
110
111
  }
111
- cutoff
112
+ cutoff
112
113
  # every once in a while we'll get a 'hash modified during iteration'-Error.
113
114
  # not to worry, we'll just try again later.
114
115
  rescue StandardError
115
116
  offset
116
117
  end
117
118
  # overrides the ODBA_PREFETCH constant and @odba_prefetch instance variable
118
- # in Persistable. Use this if a secondary client is more memory-bound than
119
+ # in Persistable. Use this if a secondary client is more memory-bound than
119
120
  # performance-bound.
120
121
  def clean_prefetched(flag=true)
121
122
  if(@clean_prefetched = flag)
@@ -133,7 +134,7 @@ module ODBA
133
134
  if(drop_existing && self.indices.include?(name))
134
135
  drop_index(name)
135
136
  end
136
- unless(self.indices.include?(name))
137
+ unless(self.indices.include?(name))
137
138
  index = create_index(definition)
138
139
  if(index.target_klass.respond_to?(:odba_extent))
139
140
  index.fill(index.target_klass.odba_extent)
@@ -158,7 +159,7 @@ module ODBA
158
159
  }
159
160
  end
160
161
  # Permanently deletes _object_ from the database and deconnects all connected
161
- # Persistables
162
+ # Persistables
162
163
  def delete(odba_object)
163
164
  odba_id = odba_object.odba_id
164
165
  name = odba_object.odba_name
@@ -199,7 +200,7 @@ module ODBA
199
200
  def drop_index(index_name)
200
201
  transaction {
201
202
  ODBA.storage.drop_index(index_name)
202
- self.delete(self.indices[index_name])
203
+ self.delete(self.indices[index_name])
203
204
  }
204
205
  end
205
206
  def drop_indices # :nodoc:
@@ -223,7 +224,7 @@ module ODBA
223
224
  # Fetch a Persistable identified by _odba_id_. Registers _odba_caller_ with
224
225
  # the CacheEntry. Loads the Persistable if it is not already loaded.
225
226
  def fetch(odba_id, odba_caller=nil)
226
- fetch_or_do(odba_id, odba_caller) {
227
+ fetch_or_do(odba_id, odba_caller) {
227
228
  load_object(odba_id, odba_caller)
228
229
  }
229
230
  end
@@ -233,24 +234,26 @@ module ODBA
233
234
  @@receiver_name = RUBY_VERSION >= '1.9' ? :@receiver : '@receiver'
234
235
  def fetch_collection(odba_obj) # :nodoc:
235
236
  collection = []
236
- bulk_fetch_ids = []
237
+ bulk_fetch_ids = []
237
238
  rows = ODBA.storage.restore_collection(odba_obj.odba_id)
238
239
  return collection if rows.empty?
240
+ idx = 0
239
241
  rows.each { |row|
240
- key = ODBA.marshaller.load(row[0])
241
- value = ODBA.marshaller.load(row[1])
242
+ key = row[0].is_a?(Integer) ? row[0] : ODBA.marshaller.load(row[0])
243
+ value = row[1].is_a?(Integer) ? row[1] : ODBA.marshaller.load(row[1])
244
+ idx += 1
242
245
  item = nil
243
246
  if([key, value].any? { |item| item.instance_variable_get(@@receiver_name) })
244
247
  odba_id = odba_obj.odba_id
245
248
  warn "stub for #{item.class}:#{item.odba_id} was saved with receiver in collection of #{odba_obj.class}:#{odba_id}"
246
249
  warn "repair: remove [#{odba_id}, #{row[0]}, #{row[1].length}]"
247
- ODBA.storage.collection_remove(odba_id, row[0])
250
+ ODBA.storage.collection_remove(odba_id, row[0])
248
251
  key = key.odba_isolated_stub
249
252
  key_dump = ODBA.marshaller.dump(key)
250
253
  value = value.odba_isolated_stub
251
254
  value_dump = ODBA.marshaller.dump(value)
252
255
  warn "repair: insert [#{odba_id}, #{key_dump}, #{value_dump.length}]"
253
- ODBA.storage.collection_store(odba_id, key_dump, value_dump)
256
+ ODBA.storage.collection_store(odba_id, key_dump, value_dump)
254
257
  end
255
258
  bulk_fetch_ids.push(key.odba_id)
256
259
  bulk_fetch_ids.push(value.odba_id)
@@ -259,8 +262,8 @@ module ODBA
259
262
  bulk_fetch_ids.compact!
260
263
  bulk_fetch_ids.uniq!
261
264
  bulk_fetch(bulk_fetch_ids, odba_obj)
262
- collection.each { |pair|
263
- pair.collect! { |item|
265
+ collection.each { |pair|
266
+ pair.collect! { |item|
264
267
  if(item.is_a?(ODBA::Stub))
265
268
  ## don't fetch: that may result in a conflict when storing.
266
269
  #fetch(item.odba_id, odba_obj)
@@ -271,7 +274,7 @@ module ODBA
271
274
  ce.odba_add_reference(odba_obj)
272
275
  ce.odba_object
273
276
  else
274
- item
277
+ item
275
278
  end
276
279
  }
277
280
  }
@@ -294,7 +297,7 @@ module ODBA
294
297
  end
295
298
  end
296
299
  def fetch_named(name, odba_caller, &block) # :nodoc:
297
- fetch_or_do(name, odba_caller) {
300
+ fetch_or_do(name, odba_caller) {
298
301
  dump = ODBA.storage.restore_named(name)
299
302
  if(dump.nil?)
300
303
  odba_obj = block.call
@@ -303,7 +306,7 @@ module ODBA
303
306
  odba_obj
304
307
  else
305
308
  fetch_or_restore(name, dump, odba_caller)
306
- end
309
+ end
307
310
  }
308
311
  end
309
312
  def fetch_or_do(obj_id, odba_caller, &block) # :nodoc:
@@ -332,7 +335,7 @@ module ODBA
332
335
  }
333
336
  }
334
337
  end
335
- def fill_index(index_name, targets)
338
+ def fill_index(index_name, targets)
336
339
  self.indices[index_name].fill(targets)
337
340
  end
338
341
  # Checks wether the object identified by _odba_id_ has been loaded.
@@ -347,7 +350,7 @@ module ODBA
347
350
  index = indices.fetch(index_name)
348
351
  index.matches substring, limit, offset
349
352
  end
350
- # Returns a Hash-table containing all stored indices.
353
+ # Returns a Hash-table containing all stored indices.
351
354
  def indices
352
355
  @indices ||= fetch_named('__cache_server_indices__', self) {
353
356
  {}
@@ -420,14 +423,14 @@ module ODBA
420
423
  def print_stats
421
424
  fmh = " %-20s | %10s | %5s | %6s | %6s | %6s | %-20s\n"
422
425
  fmt = " %-20s | %10.3f | %5i | %6.3f | %6.3f | %6.3f | %s\n"
423
- head = sprintf(fmh,
426
+ head = sprintf(fmh,
424
427
  "class", "total", "count", "min", "max", "avg", "callers")
425
- line = "-" * head.length
428
+ line = "-" * head.length
426
429
  puts line
427
430
  print head
428
431
  puts line
429
- @loading_stats.sort_by { |key, val|
430
- val[:total_time]
432
+ @loading_stats.sort_by { |key, val|
433
+ val[:total_time]
431
434
  }.reverse.each { |key, val|
432
435
  key = key.to_s
433
436
  if(key.length > 20)
@@ -482,13 +485,13 @@ module ODBA
482
485
  def size
483
486
  @prefetched.size + @fetched.size
484
487
  end
485
- def start_cleaner # :nodoc:
488
+ def start_cleaner # :nodoc:
486
489
  @cleaner = Thread.new {
487
490
  Thread.current.priority = self::class::CLEANER_PRIORITY
488
491
  loop {
489
492
  sleep(self::class::CLEANING_INTERVAL)
490
493
  begin
491
- clean
494
+ clean
492
495
  rescue StandardError => e
493
496
  puts e
494
497
  puts e.backtrace
@@ -547,14 +550,14 @@ module ODBA
547
550
  ODBA.storage.collection_remove(odba_id, key_dump)
548
551
  }.size
549
552
  changes + (collection - old_collection).each { |key_dump, value_dump|
550
- ODBA.storage.collection_store(odba_id, key_dump, value_dump)
553
+ ODBA.storage.collection_store(odba_id, key_dump, value_dump)
551
554
  }.size
552
555
  end
553
556
  def store_object_connections(odba_id, target_ids) # :nodoc:
554
557
  ODBA.storage.ensure_object_connections(odba_id, target_ids)
555
558
  end
556
- # Executes the block in a transaction. If the transaction fails, all
557
- # affected Persistable objects are reloaded from the db (which by then has
559
+ # Executes the block in a transaction. If the transaction fails, all
560
+ # affected Persistable objects are reloaded from the db (which by then has
558
561
  # also performed a rollback). Rollback is quite inefficient at this time.
559
562
  def transaction(&block)
560
563
  Thread.current[:txids] = []
@@ -52,7 +52,7 @@ module ODBA
52
52
  object
53
53
  end
54
54
  def odba_cut_connections!
55
- @cache_entry_mutex.synchronize do
55
+ @cache_entry_mutex.synchronize do
56
56
  @accessed_by.each { |object_id, odba_id|
57
57
  if((item = odba_id2ref(odba_id) || object_id2ref(object_id, odba_id)) \
58
58
  && item.respond_to?(:odba_cut_connection))
@@ -77,10 +77,10 @@ module ODBA
77
77
  && (retire_horizon > @last_access)
78
78
  end
79
79
  def odba_retire opts={}
80
- # replace with stubs in accessed_by
80
+ # replace with stubs in accessed_by
81
81
  instance = _odba_object
82
82
  if opts[:force]
83
- @cache_entry_mutex.synchronize do
83
+ @cache_entry_mutex.synchronize do
84
84
  @accessed_by.each do |object_id, odba_id|
85
85
  if item = odba_id2ref(odba_id)
86
86
  item.odba_stubize instance, opts
@@ -23,7 +23,7 @@ module ODBA
23
23
  ]
24
24
  end
25
25
  attr_accessor :origin_klass, :target_klass, :resolve_origin, :resolve_target,
26
- :resolve_search_term, :index_name, :dictionary, :class_filter
26
+ :resolve_search_term, :index_name, :class_filter
27
27
  def initialize(index_definition, origin_module)
28
28
  @origin_klass = origin_module.instance_eval(index_definition.origin_klass.to_s)
29
29
  @target_klass = origin_module.instance_eval(index_definition.target_klass.to_s)
@@ -31,7 +31,6 @@ module ODBA
31
31
  @resolve_target = index_definition.resolve_target
32
32
  @index_name = index_definition.index_name
33
33
  @resolve_search_term = index_definition.resolve_search_term
34
- @dictionary = index_definition.dictionary
35
34
  @class_filter = index_definition.class_filter
36
35
  end
37
36
  def current_origin_ids(target_id) # :nodoc:
@@ -381,15 +380,12 @@ module ODBA
381
380
  end
382
381
  def fetch_ids(search_term, meta=nil) # :nodoc:
383
382
  limit = meta.respond_to?(:limit) && meta.limit
384
- rows = ODBA.storage.retrieve_from_fulltext_index(@index_name,
385
- search_term, @dictionary, limit)
383
+ rows = ODBA.storage.retrieve_from_fulltext_index(@index_name, search_term, limit)
386
384
  set_relevance(meta, rows)
387
385
  rows.collect { |row| row.at(0) }
388
386
  end
389
387
  def do_update_index(origin_id, search_text, target_id=nil) # :nodoc:
390
- ODBA.storage.update_fulltext_index(@index_name, origin_id,
391
- search_text, target_id,
392
- @dictionary)
388
+ ODBA.storage.update_fulltext_index(@index_name, origin_id, search_text, target_id)
393
389
  end
394
390
  end
395
391
  end
@@ -5,8 +5,8 @@ module ODBA
5
5
  # IndexDefinition is a convenience class. Load a yaml-dump of this and pass it
6
6
  # to Cache#create_index to introduce new indices
7
7
  class IndexDefinition
8
- attr_accessor :index_name, :dictionary, :origin_klass,
9
- :target_klass, :resolve_search_term, :resolve_target,
8
+ attr_accessor :index_name, :origin_klass,
9
+ :target_klass, :resolve_search_term, :resolve_target,
10
10
  :resolve_origin, :fulltext, :init_source, :class_filter
11
11
  def initialize
12
12
  @index_name = ""
@@ -15,7 +15,6 @@ module ODBA
15
15
  @resolve_search_term = ""
16
16
  @resolve_target = ""
17
17
  @resolve_origin = ""
18
- @dictionary = ""
19
18
  @init_source = ""
20
19
  @fulltext = false
21
20
  @class_filter = :is_a?
@@ -2,8 +2,8 @@
2
2
  #-- Marshal -- odba -- 29.04.2004 -- hwyss@ywesee.com rwaltert@ywesee.com mwalder@ywesee.com
3
3
 
4
4
  module ODBA
5
- # Marshal is a simple extension of ::Marshal. To be able to store our data
6
- # using the DBI-Interface, we need to escape invalid characters from the
5
+ # Marshal is a simple extension of ::Marshal. To be able to store our data
6
+ # using the DBI-Interface, we need to escape invalid characters from the
7
7
  # standard binary dump.
8
8
  module Marshal
9
9
  def Marshal.dump(obj)
@@ -13,6 +13,9 @@ module ODBA
13
13
  def Marshal.load(hexdump)
14
14
  binary = [hexdump].pack('H*')
15
15
  ::Marshal.load(binary)
16
+ rescue => error
17
+ $stderr.puts "#{error}: hexdump is #{hexdump.inspect} #{error.backtrace.join("\n")}"
18
+ Date.new
16
19
  end
17
20
  end
18
21
  end