odba 1.1.2 → 1.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: d60345e57c1bfc54f935b1349f9e94da3a0a4135
4
- data.tar.gz: 8c7ffb139d6f7b93e99aa500357267a2c2429b26
3
+ metadata.gz: 2a275f0c5476fd282d1f7c67cce22d64357671f8
4
+ data.tar.gz: 7832d2977f9d568801af283a522ea5833ca8bf99
5
5
  SHA512:
6
- metadata.gz: 71678fe42e0915eada7c995c611d01282b62564f494d9fd91bcf71b538a054a8a6f7014fffb3223447836bccfa55652994676314f216124b1b389ee5f3add161
7
- data.tar.gz: 5daaf796ac79b2fc707fcb4087535e8455533cd2f7fe566d6e9deeaf74d226c47d3c087349590f2cd36bc09982dad6b159d7f8ce58854239964b1b0254de5de1
6
+ metadata.gz: 898f28dbabb3d7542ebeb6b7bbc3cf49934d036334da1f6042c82446189c8248a108903d0b285001b37a43ac0661c7e83d588680367378e7cc65dc3208eddefd
7
+ data.tar.gz: 639fa00ebbb43378e0a2ce97205d0fc367d703a85872c321ecc802061b4a63e9e6e384665d53c495209afc263f7de8fd7f63e12846d8e3f251f35377d57269ff
@@ -1,3 +1,9 @@
1
+ === 1.1.3 / 12.12.2016
2
+
3
+ * Avoid errors by always specifying "IF NOT EXISTS" when creating tables and indices
4
+ * Add utility method get_server_version
5
+ * Removed misleaading check in generate_dictionary
6
+
1
7
  === 1.1.2 / 10.05.2016
2
8
 
3
9
  * requires now 'ydbi' and 'ydbd-pg'
@@ -15,31 +15,31 @@ module ODBA
15
15
  TABLES = [
16
16
  # in table 'object', the isolated dumps of all objects are stored
17
17
  ['object', <<-'SQL'],
18
- CREATE TABLE object (
18
+ CREATE TABLE IF NOT EXISTS object (
19
19
  odba_id INTEGER NOT NULL, content TEXT,
20
20
  name TEXT, prefetchable BOOLEAN, extent TEXT,
21
21
  PRIMARY KEY(odba_id), UNIQUE(name)
22
22
  );
23
23
  SQL
24
24
  ['prefetchable_index', <<-SQL],
25
- CREATE INDEX prefetchable_index ON object(prefetchable);
25
+ CREATE INDEX IF NOT EXISTS prefetchable_index ON object(prefetchable);
26
26
  SQL
27
27
  ['extent_index', <<-SQL],
28
- CREATE INDEX extent_index ON object(extent);
28
+ CREATE INDEX IF NOT EXISTS extent_index ON object(extent);
29
29
  SQL
30
30
  # helper table 'object_connection'
31
31
  ['object_connection', <<-'SQL'],
32
- CREATE TABLE object_connection (
32
+ CREATE TABLE IF NOT EXISTS object_connection (
33
33
  origin_id integer, target_id integer,
34
34
  PRIMARY KEY(origin_id, target_id)
35
35
  );
36
36
  SQL
37
37
  ['target_id_index', <<-SQL],
38
- CREATE INDEX target_id_index ON object_connection(target_id);
38
+ CREATE INDEX IF NOT EXISTS target_id_index ON object_connection(target_id);
39
39
  SQL
40
40
  # helper table 'collection'
41
41
  ['collection', <<-'SQL'],
42
- CREATE TABLE collection (
42
+ CREATE TABLE IF NOT EXISTS collection (
43
43
  odba_id integer NOT NULL, key text, value text,
44
44
  PRIMARY KEY(odba_id, key)
45
45
  );
@@ -135,7 +135,7 @@ CREATE TABLE collection (
135
135
  end
136
136
  def create_condition_index(table_name, definition)
137
137
  self.dbi.do <<-SQL
138
- CREATE TABLE #{table_name} (
138
+ CREATE TABLE IF NOT EXISTS #{table_name} (
139
139
  origin_id INTEGER,
140
140
  #{definition.collect { |*pair| pair.join(' ') }.join(",\n ") },
141
141
  target_id INTEGER
@@ -143,22 +143,22 @@ CREATE TABLE #{table_name} (
143
143
  SQL
144
144
  #index origin_id
145
145
  self.dbi.do <<-SQL
146
- CREATE INDEX origin_id_#{table_name} ON #{table_name}(origin_id);
146
+ CREATE INDEX IF NOT EXISTS origin_id_#{table_name} ON #{table_name}(origin_id);
147
147
  SQL
148
148
  #index search_term
149
149
  definition.each { |name, datatype|
150
150
  self.dbi.do <<-SQL
151
- CREATE INDEX #{name}_#{table_name} ON #{table_name}(#{name});
151
+ CREATE INDEX IF NOT EXISTS #{name}_#{table_name} ON #{table_name}(#{name});
152
152
  SQL
153
153
  }
154
154
  #index target_id
155
155
  self.dbi.do <<-SQL
156
- CREATE INDEX target_id_#{table_name} ON #{table_name}(target_id);
156
+ CREATE INDEX IF NOT EXISTS target_id_#{table_name} ON #{table_name}(target_id);
157
157
  SQL
158
158
  end
159
159
  def create_fulltext_index(table_name)
160
160
  self.dbi.do <<-SQL
161
- CREATE TABLE #{table_name} (
161
+ CREATE TABLE IF NOT EXISTS #{table_name} (
162
162
  origin_id INTEGER,
163
163
  search_term tsvector,
164
164
  target_id INTEGER
@@ -166,21 +166,20 @@ CREATE TABLE #{table_name} (
166
166
  SQL
167
167
  #index origin_id
168
168
  self.dbi.do <<-SQL
169
- CREATE INDEX origin_id_#{table_name} ON #{table_name}(origin_id);
169
+ CREATE INDEX IF NOT EXISTS origin_id_#{table_name} ON #{table_name}(origin_id);
170
170
  SQL
171
- #index search_term
172
171
  self.dbi.do <<-SQL
173
- CREATE INDEX search_term_#{table_name}
172
+ CREATE INDEX IF NOT EXISTS search_term_#{table_name}
174
173
  ON #{table_name} USING gist(search_term);
175
174
  SQL
176
175
  #index target_id
177
176
  self.dbi.do <<-SQL
178
- CREATE INDEX target_id_#{table_name} ON #{table_name}(target_id);
177
+ CREATE INDEX IF NOT EXISTS target_id_#{table_name} ON #{table_name}(target_id);
179
178
  SQL
180
179
  end
181
180
  def create_index(table_name)
182
181
  self.dbi.do <<-SQL
183
- CREATE TABLE #{table_name} (
182
+ CREATE TABLE IF NOT EXISTS #{table_name} (
184
183
  origin_id INTEGER,
185
184
  search_term TEXT,
186
185
  target_id INTEGER
@@ -188,17 +187,17 @@ CREATE INDEX target_id_#{table_name} ON #{table_name}(target_id);
188
187
  SQL
189
188
  #index origin_id
190
189
  self.dbi.do <<-SQL
191
- CREATE INDEX origin_id_#{table_name}
190
+ CREATE INDEX IF NOT EXISTS origin_id_#{table_name}
192
191
  ON #{table_name}(origin_id)
193
192
  SQL
194
193
  #index search_term
195
194
  self.dbi.do <<-SQL
196
- CREATE INDEX search_term_#{table_name}
195
+ CREATE INDEX IF NOT EXISTS search_term_#{table_name}
197
196
  ON #{table_name}(search_term)
198
197
  SQL
199
198
  #index target_id
200
199
  self.dbi.do <<-SQL
201
- CREATE INDEX target_id_#{table_name}
200
+ CREATE INDEX IF NOT EXISTS target_id_#{table_name}
202
201
  ON #{table_name}(target_id)
203
202
  SQL
204
203
  end
@@ -267,7 +266,7 @@ CREATE INDEX target_id_#{table_name} ON #{table_name}(target_id);
267
266
  def ensure_target_id_index(table_name)
268
267
  #index target_id
269
268
  self.dbi.do <<-SQL
270
- CREATE INDEX target_id_#{table_name}
269
+ CREATE INDEX IF NOT EXISTS target_id_#{table_name}
271
270
  ON #{table_name}(target_id)
272
271
  SQL
273
272
  rescue
@@ -288,6 +287,9 @@ CREATE INDEX target_id_#{table_name} ON #{table_name}(target_id);
288
287
  WHERE #{id_name} = ?
289
288
  SQL
290
289
  end
290
+ def get_server_version
291
+ /\s([\d\.]+)\s/.match(self.dbi.select_all("select version();").first.first)[1]
292
+ end
291
293
  def fulltext_index_target_ids(index_name, origin_id)
292
294
  sql = <<-SQL
293
295
  SELECT DISTINCT target_id
@@ -296,17 +298,11 @@ CREATE INDEX target_id_#{table_name} ON #{table_name}(target_id);
296
298
  SQL
297
299
  self.dbi.select_all(sql, origin_id)
298
300
  end
299
- def generate_dictionary(language, data_path='/usr/share/postgresql/tsearch_data/', file='fulltext')
300
- found = true
301
- %w{dict affix stop}.each do |ext|
302
- filename = "#{language}_#{file}.#{ext}"
303
- source = File.join(data_path, filename)
304
- unless File.exists?(source)
305
- puts "ERROR: \"#{filename}\" does not exist in #{data_path}."
306
- found = false
307
- end
308
- end
309
- return unless found
301
+ def generate_dictionary(language)
302
+ # postgres searches for the dictionary file in the directory share/tsearch_data of it installation location
303
+ # By default under gentoo, this is /usr/share/postgresql/tsearch_data/
304
+ # As we have no way to get the current installation path, we do not check whether the files are present or not
305
+ file='fulltext'
310
306
  # setup configuration
311
307
  self.dbi.do <<-SQL
312
308
  CREATE TEXT SEARCH CONFIGURATION public.default_#{language} ( COPY = pg_catalog.#{language} );
@@ -526,7 +522,7 @@ CREATE INDEX target_id_#{table_name} ON #{table_name}(target_id);
526
522
  unless(self.dbi.columns('object').any? { |col| col.name == 'extent' })
527
523
  self.dbi.do <<-EOS
528
524
  ALTER TABLE object ADD COLUMN extent TEXT;
529
- CREATE INDEX extent_index ON object(extent);
525
+ CREATE INDEX IF NOT EXISTS extent_index ON object(extent);
530
526
  EOS
531
527
  end
532
528
  end
@@ -1,5 +1,5 @@
1
1
  #!/usr/bin/env ruby
2
2
 
3
3
  class Odba
4
- VERSION = '1.1.2'
4
+ VERSION = '1.1.3'
5
5
  end
@@ -18,11 +18,11 @@ Gem::Specification.new do |spec|
18
18
 
19
19
  spec.add_dependency 'ydbi', '>=0.5.1'
20
20
  spec.add_dependency 'ydbd-pg','>=0.5.1'
21
-
21
+
22
22
  spec.add_development_dependency "bundler"
23
23
  spec.add_development_dependency "rake"
24
24
  spec.add_development_dependency "flexmock"
25
- spec.add_development_dependency "simplecov"
25
+ spec.add_development_dependency "simplecov", '>= 0.14.1'
26
26
  spec.add_development_dependency "minitest" if /^1\./.match(RUBY_VERSION)
27
27
  end
28
28
 
@@ -16,7 +16,7 @@ require 'odba/odba_error'
16
16
  require 'odba/odba'
17
17
 
18
18
  module ODBA
19
- class Cache
19
+ class Cache
20
20
  CLEANING_INTERVAL = 0
21
21
  MAIL_RECIPIENTS = []
22
22
  MAIL_FROM = "test@testfirst.local"
@@ -216,7 +216,7 @@ module ODBA
216
216
  @marshal.should_receive(:load).and_return {|dump|
217
217
  receiver
218
218
  }
219
- @storage.should_receive(:restore_collection).and_return {|*args|
219
+ @storage.should_receive(:restore_collection).and_return {|*args|
220
220
  []
221
221
  }
222
222
  receiver.instance_variable_set("@odba_id", 23)
@@ -233,7 +233,7 @@ module ODBA
233
233
  def test_fetch_error
234
234
  receiver = flexmock
235
235
  @storage.should_receive(:restore).and_return { |odba_id|
236
- nil
236
+ nil
237
237
  }
238
238
  assert_raises(OdbaError) {
239
239
  @cache.load_object(23, receiver)
@@ -289,17 +289,17 @@ module ODBA
289
289
  assert_equal(new_collection, col)
290
290
  }
291
291
 
292
- @storage.should_receive(:restore_collection).and_return {
292
+ @storage.should_receive(:restore_collection).and_return {
293
293
  old_collection.collect { |key, val|
294
- [Marshal.dump(key.odba_isolated_stub),
294
+ [Marshal.dump(key.odba_isolated_stub),
295
295
  Marshal.dump(val.odba_isolated_stub)]
296
296
  }
297
297
  }
298
- @storage.should_receive(:collection_remove).and_return { |odba_id, key|
298
+ @storage.should_receive(:collection_remove).and_return { |odba_id, key|
299
299
  assert_equal(54, odba_id)
300
300
  assert_equal(Marshal.dump('key1'.odba_isolated_stub), key)
301
301
  }
302
- @storage.should_receive(:collection_store).and_return { |odba_id, key, value|
302
+ @storage.should_receive(:collection_store).and_return { |odba_id, key, value|
303
303
  assert_equal(54, odba_id)
304
304
  assert_equal(Marshal.dump('key3'.odba_isolated_stub), key)
305
305
  assert_equal(Marshal.dump('val3'.odba_isolated_stub), value)
@@ -308,7 +308,7 @@ module ODBA
308
308
  @cache.fetched = {
309
309
  54 => cache_entry
310
310
  }
311
-
311
+
312
312
  obj = flexmock('Obj')
313
313
  obj.should_receive(:odba_id).and_return { 54 }
314
314
  obj.should_receive(:odba_collection).and_return { new_collection }
@@ -349,10 +349,10 @@ module ODBA
349
349
  end
350
350
  def test_fill_index
351
351
  foo = flexmock("foo")
352
- foo.should_receive(:fill).and_return { |target|
352
+ foo.should_receive(:fill).and_return { |target|
353
353
  assert_equal("baz", target)
354
354
  }
355
- @cache.indices = {
355
+ @cache.indices = {
356
356
  "foo" => foo
357
357
  }
358
358
  @cache.fill_index("foo", "baz")
@@ -436,9 +436,9 @@ module ODBA
436
436
 
437
437
  @storage.should_receive(:restore_collection).and_return { [] }
438
438
  if(block)
439
- @storage.should_receive(:store, &block).and_return
439
+ @storage.should_receive(:store, &block).and_return
440
440
  else
441
- @storage.should_receive(:store).and_return {
441
+ @storage.should_receive(:store).and_return {
442
442
  assert(true)
443
443
  }
444
444
  end
@@ -453,39 +453,39 @@ module ODBA
453
453
  origin_obj.odba_connection = delete_item
454
454
  @cache.fetched.store(1, delete_item)
455
455
  @storage.should_receive(:retrieve_connected_objects).and_return { |id|
456
- [[2]]
456
+ [[2]]
457
457
  }
458
458
  prepare_fetch(2, origin_obj)
459
- @storage.should_receive(:restore_collection).and_return { |*args|
459
+ @storage.should_receive(:restore_collection).and_return { |*args|
460
460
  []
461
461
  }
462
462
  @storage.should_receive(:store).and_return { |id, dump, name, prefetch, klass| }
463
- @storage.should_receive(:ensure_object_connections).and_return { }
464
- @storage.should_receive(:delete_persistable).and_return { |id| }
463
+ @storage.should_receive(:ensure_object_connections).and_return { }
464
+ @storage.should_receive(:delete_persistable).and_return { |id| }
465
465
  @marshal.should_receive(:dump).and_return { |ob| "foo"}
466
466
  @cache.delete(delete_item)
467
467
  assert_equal(1, @cache.fetched.size)
468
- assert_equal(nil, origin_obj.odba_connection)
468
+ assert_nil(origin_obj.odba_connection)
469
469
  end
470
470
  def prepare_delete(mock, name, id)
471
471
  mock.should_receive(:odba_id).and_return { id }
472
472
  mock.should_receive(:odba_name).and_return { name }
473
473
  mock.should_receive(:odba_notify_observers).and_return { |key, id1, id2|
474
- assert_equal(:delete, key)
474
+ assert_equal(:delete, key)
475
475
  }
476
476
  @storage.should_receive(:retrieve_connected_objects).and_return { |id|
477
477
  []
478
478
  }
479
479
  mock.should_receive(:origin_class?).and_return { true }
480
480
  mock.should_receive(:odba_id).and_return { id }
481
- @storage.should_receive(:delete_persistable).and_return { |id_arg|
481
+ @storage.should_receive(:delete_persistable).and_return { |id_arg|
482
482
  assert_equal(id, id_arg)
483
483
  }
484
484
  @storage.should_receive(:delete_index_element).and_return { }
485
485
  end
486
486
  def prepare_bulk_restore(rows)
487
487
  rows.each { |odba_mock|
488
- ## according to recent changes, objects are extended with
488
+ ## according to recent changes, objects are extended with
489
489
  # ODBA::Persistable after loading - this enables ad-hoc storing
490
490
  # but messes up loads of tests
491
491
  @marshal.should_receive(:load).and_return { |dump|
@@ -591,7 +591,7 @@ module ODBA
591
591
  ## store o1
592
592
  @marshal.should_receive(:dump).times(3).and_return { |obj|
593
593
  "dump%i" % obj.odba_id
594
- }
594
+ }
595
595
  next_id = 1
596
596
  @storage.should_receive(:next_id).and_return { next_id += 1 }
597
597
  @storage.should_receive(:store).with(1,'dump1',nil,nil,Object)\
@@ -631,7 +631,7 @@ module ODBA
631
631
  .times(1).and_return(o4)
632
632
  @cache.fetched.store(1, ODBA::CacheEntry.new(o1))
633
633
  assert_raises(RuntimeError) {
634
- ODBA.transaction {
634
+ ODBA.transaction {
635
635
  o2.instance_variable_set('@other', o3)
636
636
  o1.instance_variable_set('@other', o2)
637
637
  o1.odba_store
@@ -30,7 +30,7 @@ module ODBA
30
30
  class ODBAContainerInPersistable
31
31
  include ODBA::Persistable
32
32
  ODBA_SERIALIZABLE = ['@serializable']
33
- attr_accessor :non_replaceable, :replaceable, :replaceable2,
33
+ attr_accessor :non_replaceable, :replaceable, :replaceable2,
34
34
  :array, :odba_persistent, :serializable
35
35
  attr_accessor :odba_snapshot_level
36
36
  end
@@ -181,11 +181,11 @@ module ODBA
181
181
  level1.replaceable = level2
182
182
 
183
183
  saved.odba_persistent = true
184
- ODBA.cache.should_receive(:store).times(3).and_return {
184
+ ODBA.cache.should_receive(:store).times(3).and_return {
185
185
  assert(true)
186
186
  2
187
187
  }
188
-
188
+
189
189
  @odba.odba_store_unsaved
190
190
  end
191
191
  def test_odba_store_unsaved_hash
@@ -196,12 +196,12 @@ module ODBA
196
196
  level1.replaceable = hash
197
197
  level1.non_replaceable = non_rep_hash
198
198
  non_rep_hash.odba_persistent = true
199
-
200
- ODBA.cache.should_receive(:store).times(2).and_return {
199
+
200
+ ODBA.cache.should_receive(:store).times(2).and_return {
201
201
  assert(true)
202
202
  2
203
203
  }
204
-
204
+
205
205
  level1.odba_store_unsaved
206
206
  end
207
207
  def test_dup
@@ -225,8 +225,8 @@ module ODBA
225
225
  odba_twin = @odba.odba_dup
226
226
  odba_twin.replaceable.flexmock_verify
227
227
  odba_twin.replaceable2.flexmock_verify
228
- assert_equal(odba_twin, stub_container)
229
- assert_equal(odba_twin, stub_container2)
228
+ assert_equal(odba_twin, stub_container)
229
+ assert_equal(odba_twin, stub_container2)
230
230
  end
231
231
  def test_odba_unsaved_true
232
232
  @odba.instance_variable_set("@odba_persistent", false)
@@ -282,7 +282,7 @@ module ODBA
282
282
  ODBA.cache.should_receive(:next_id).and_return(1)
283
283
  dump, hash = odba.odba_isolated_dump
284
284
  obj = ODBA.marshaller.load(dump)
285
- assert_equal(nil, obj.excluded)
285
+ assert_nil(obj.excluded)
286
286
  assert_equal("baz", obj.included)
287
287
  ODBA.marshaller = tmp
288
288
  end
@@ -385,7 +385,7 @@ module ODBA
385
385
  }
386
386
  ODBA.cache.should_receive(:retrieve_from_index).with(name, args)\
387
387
  .times(1).and_return([result])
388
- assert_equal([result],
388
+ assert_equal([result],
389
389
  IndexedStub.search_by_foo_and_bar('oof', 'rab'))
390
390
 
391
391
  ## exact search by multiple keys
@@ -393,11 +393,11 @@ module ODBA
393
393
  ODBA.cache.should_receive(:retrieve_from_index)\
394
394
  .with(name, args, Persistable::Exact)\
395
395
  .times(1).and_return([result])
396
- assert_equal([result],
396
+ assert_equal([result],
397
397
  IndexedStub.search_by_exact_foo_and_bar('oof', 'rab'))
398
398
 
399
399
  ## find by multiple keys
400
- args = {:foo => {'value' => 7,'condition' => '='},
400
+ args = {:foo => {'value' => 7,'condition' => '='},
401
401
  :bar => {'value' => 'rab','condition' => 'like'}}
402
402
  ODBA.cache.should_receive(:retrieve_from_index)\
403
403
  .with(name, args, Persistable::Find)\
@@ -497,7 +497,7 @@ module ODBA
497
497
 
498
498
  modified.instance_variable_set('@data', 'bar')
499
499
  assert_equal('bar', modified.instance_variable_get('@data'))
500
-
500
+
501
501
  modified.odba_replace!(reloaded)
502
502
  assert_equal('foo', modified.instance_variable_get('@data'))
503
503
  end
@@ -553,5 +553,5 @@ module ODBA
553
553
  o = ODBAContainerInPersistable.new
554
554
  assert_equal([], o.odba_collection)
555
555
  end
556
- end
556
+ end
557
557
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: odba
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.1.2
4
+ version: 1.1.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Masaomi Hatakeyama, Zeno R.R. Davatz
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2016-05-10 00:00:00.000000000 Z
11
+ date: 2017-12-12 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: ydbi
@@ -86,14 +86,14 @@ dependencies:
86
86
  requirements:
87
87
  - - ">="
88
88
  - !ruby/object:Gem::Version
89
- version: '0'
89
+ version: 0.14.1
90
90
  type: :development
91
91
  prerelease: false
92
92
  version_requirements: !ruby/object:Gem::Requirement
93
93
  requirements:
94
94
  - - ">="
95
95
  - !ruby/object:Gem::Version
96
- version: '0'
96
+ version: 0.14.1
97
97
  description: Object Database Access
98
98
  email: mhatakeyama@ywesee.com, zdavatz@ywesee.com
99
99
  executables: []
@@ -165,9 +165,8 @@ required_rubygems_version: !ruby/object:Gem::Requirement
165
165
  version: '0'
166
166
  requirements: []
167
167
  rubyforge_project:
168
- rubygems_version: 2.4.5
168
+ rubygems_version: 2.6.8
169
169
  signing_key:
170
170
  specification_version: 4
171
171
  summary: Ruby Software for ODDB.org Memory Management
172
172
  test_files: []
173
- has_rdoc: