sequel 2.12.0 → 3.0.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (91) hide show
  1. data/CHANGELOG +62 -0
  2. data/README.rdoc +3 -3
  3. data/Rakefile +7 -0
  4. data/doc/advanced_associations.rdoc +44 -0
  5. data/doc/release_notes/3.0.0.txt +221 -0
  6. data/lib/sequel/adapters/amalgalite.rb +208 -0
  7. data/lib/sequel/adapters/db2.rb +3 -0
  8. data/lib/sequel/adapters/dbi.rb +9 -0
  9. data/lib/sequel/adapters/do.rb +0 -4
  10. data/lib/sequel/adapters/firebird.rb +16 -18
  11. data/lib/sequel/adapters/informix.rb +5 -3
  12. data/lib/sequel/adapters/jdbc.rb +24 -20
  13. data/lib/sequel/adapters/jdbc/h2.rb +15 -4
  14. data/lib/sequel/adapters/mysql.rb +4 -8
  15. data/lib/sequel/adapters/odbc.rb +0 -4
  16. data/lib/sequel/adapters/oracle.rb +0 -4
  17. data/lib/sequel/adapters/shared/mssql.rb +16 -5
  18. data/lib/sequel/adapters/shared/mysql.rb +87 -86
  19. data/lib/sequel/adapters/shared/oracle.rb +92 -3
  20. data/lib/sequel/adapters/shared/postgres.rb +85 -29
  21. data/lib/sequel/adapters/shared/progress.rb +8 -3
  22. data/lib/sequel/adapters/shared/sqlite.rb +53 -23
  23. data/lib/sequel/adapters/sqlite.rb +4 -7
  24. data/lib/sequel/adapters/utils/unsupported.rb +3 -3
  25. data/lib/sequel/connection_pool.rb +18 -25
  26. data/lib/sequel/core.rb +2 -21
  27. data/lib/sequel/database.rb +60 -44
  28. data/lib/sequel/database/schema_generator.rb +26 -31
  29. data/lib/sequel/database/schema_methods.rb +8 -3
  30. data/lib/sequel/database/schema_sql.rb +114 -28
  31. data/lib/sequel/dataset.rb +14 -41
  32. data/lib/sequel/dataset/convenience.rb +31 -54
  33. data/lib/sequel/dataset/graph.rb +7 -13
  34. data/lib/sequel/dataset/sql.rb +43 -54
  35. data/lib/sequel/extensions/inflector.rb +0 -5
  36. data/lib/sequel/extensions/schema_dumper.rb +238 -0
  37. data/lib/sequel/metaprogramming.rb +0 -20
  38. data/lib/sequel/model.rb +1 -2
  39. data/lib/sequel/model/base.rb +18 -16
  40. data/lib/sequel/model/inflections.rb +6 -9
  41. data/lib/sequel/plugins/caching.rb +0 -6
  42. data/lib/sequel/plugins/hook_class_methods.rb +1 -1
  43. data/lib/sequel/sql.rb +2 -0
  44. data/lib/sequel/version.rb +2 -2
  45. data/spec/adapters/firebird_spec.rb +35 -8
  46. data/spec/adapters/mysql_spec.rb +173 -266
  47. data/spec/adapters/oracle_spec.rb +13 -0
  48. data/spec/adapters/postgres_spec.rb +127 -227
  49. data/spec/adapters/sqlite_spec.rb +13 -171
  50. data/spec/core/connection_pool_spec.rb +15 -4
  51. data/spec/core/core_sql_spec.rb +14 -170
  52. data/spec/core/database_spec.rb +50 -132
  53. data/spec/core/dataset_spec.rb +47 -930
  54. data/spec/core/expression_filters_spec.rb +12 -0
  55. data/spec/core/schema_generator_spec.rb +37 -45
  56. data/spec/core/schema_spec.rb +26 -16
  57. data/spec/core/spec_helper.rb +0 -25
  58. data/spec/extensions/inflector_spec.rb +0 -3
  59. data/spec/extensions/schema_dumper_spec.rb +292 -0
  60. data/spec/extensions/serialization_spec.rb +9 -0
  61. data/spec/extensions/single_table_inheritance_spec.rb +6 -1
  62. data/spec/extensions/spec_helper.rb +1 -3
  63. data/spec/extensions/validation_helpers_spec.rb +4 -4
  64. data/spec/integration/database_test.rb +18 -0
  65. data/spec/integration/dataset_test.rb +112 -1
  66. data/spec/integration/eager_loader_test.rb +70 -9
  67. data/spec/integration/prepared_statement_test.rb +2 -2
  68. data/spec/integration/schema_test.rb +76 -27
  69. data/spec/integration/spec_helper.rb +0 -14
  70. data/spec/integration/transaction_test.rb +27 -0
  71. data/spec/model/associations_spec.rb +0 -36
  72. data/spec/model/base_spec.rb +18 -123
  73. data/spec/model/hooks_spec.rb +2 -235
  74. data/spec/model/inflector_spec.rb +15 -115
  75. data/spec/model/model_spec.rb +0 -120
  76. data/spec/model/plugins_spec.rb +0 -70
  77. data/spec/model/record_spec.rb +35 -93
  78. data/spec/model/spec_helper.rb +0 -27
  79. data/spec/model/validations_spec.rb +0 -931
  80. metadata +9 -14
  81. data/lib/sequel/deprecated.rb +0 -593
  82. data/lib/sequel/deprecated_migration.rb +0 -91
  83. data/lib/sequel/model/deprecated.rb +0 -204
  84. data/lib/sequel/model/deprecated_hooks.rb +0 -103
  85. data/lib/sequel/model/deprecated_inflector.rb +0 -335
  86. data/lib/sequel/model/deprecated_validations.rb +0 -388
  87. data/spec/core/core_ext_spec.rb +0 -156
  88. data/spec/core/migration_spec.rb +0 -263
  89. data/spec/core/pretty_table_spec.rb +0 -58
  90. data/spec/model/caching_spec.rb +0 -217
  91. data/spec/model/schema_spec.rb +0 -92
@@ -3,6 +3,34 @@ Sequel.require %w'date_format unsupported', 'adapters/utils'
3
3
  module Sequel
4
4
  module Oracle
5
5
  module DatabaseMethods
6
+ TEMPORARY = 'GLOBAL TEMPORARY '.freeze
7
+ AUTOINCREMENT = ''.freeze
8
+
9
+ def create_sequence(name, opts={})
10
+ self << create_sequence_sql(name, opts)
11
+ end
12
+
13
+ def create_table(name, options={}, &block)
14
+ options = {:generator=>options} if options.is_a?(Schema::Generator)
15
+ generator = options[:generator] || Schema::Generator.new(self, &block)
16
+ drop_statement, create_statements = create_table_sql_list(name, generator, options)
17
+ (execute_ddl(drop_statement) rescue nil) if drop_statement
18
+ (create_statements + index_sql_list(name, generator.indexes)).each{|sql| execute_ddl(sql)}
19
+ end
20
+
21
+ def create_trigger(*args)
22
+ self << create_trigger_sql(*args)
23
+ end
24
+
25
+ def drop_sequence(name)
26
+ self << drop_sequence_sql(name)
27
+ end
28
+
29
+ # Oracle uses the :oracle database type
30
+ def database_type
31
+ :oracle
32
+ end
33
+
6
34
  def tables(opts={})
7
35
  ds = from(:tab).server(opts[:server]).select(:tname).filter(:tabtype => 'TABLE')
8
36
  ds.map{|r| ds.send(:output_identifier, r[:tname])}
@@ -11,6 +39,62 @@ module Sequel
11
39
  def table_exists?(name)
12
40
  from(:tab).filter(:tname =>dataset.send(:input_identifier, name), :tabtype => 'TABLE').count > 0
13
41
  end
42
+
43
+ private
44
+
45
+ def auto_increment_sql
46
+ AUTOINCREMENT
47
+ end
48
+
49
+ # SQL fragment for showing a table is temporary
50
+ def temporary_table_sql
51
+ TEMPORARY
52
+ end
53
+
54
+ def create_sequence_sql(name, opts={})
55
+ "CREATE SEQUENCE #{quote_identifier(name)} start with #{opts [:start_with]||1} increment by #{opts[:increment_by]||1} nomaxvalue"
56
+ end
57
+
58
+ def create_table_sql_list(name, generator, options={})
59
+ statements = [create_table_sql(name, generator, options)]
60
+ drop_seq_statement = nil
61
+ generator.columns.each do |c|
62
+ if c[:auto_increment]
63
+ c[:sequence_name] ||= "seq_#{name}_#{c[:name]}"
64
+ unless c[:create_sequence] == false
65
+ drop_seq_statement = drop_sequence_sql(c[:sequence_name])
66
+ statements << create_sequence_sql(c[:sequence_name], c)
67
+ end
68
+ unless c[:create_trigger] == false
69
+ c[:trigger_name] ||= "BI_#{name}_#{c[:name]}"
70
+ trigger_definition = <<-end_sql
71
+ BEGIN
72
+ IF :NEW.#{quote_identifier(c[:name])} IS NULL THEN
73
+ SELECT #{c[:sequence_name]}.nextval INTO :NEW.#{quote_identifier(c[:name])} FROM dual;
74
+ END IF;
75
+ END;
76
+ end_sql
77
+ statements << create_trigger_sql(name, c[:trigger_name], trigger_definition, {:events => [:insert]})
78
+ end
79
+ end
80
+ end
81
+ [drop_seq_statement, statements]
82
+ end
83
+
84
+ def create_trigger_sql(table, name, definition, opts={})
85
+ events = opts[:events] ? Array(opts[:events]) : [:insert, :update, :delete]
86
+ sql = <<-end_sql
87
+ CREATE#{' OR REPLACE' if opts[:replace]} TRIGGER #{quote_identifier(name)}
88
+ #{opts[:after] ? 'AFTER' : 'BEFORE'} #{events.map{|e| e.to_s.upcase}.join(' OR ')} ON #{quote_schema_table(table)}
89
+ REFERENCING NEW AS NEW FOR EACH ROW
90
+ #{definition}
91
+ end_sql
92
+ sql
93
+ end
94
+
95
+ def drop_sequence_sql(name)
96
+ "DROP SEQUENCE #{quote_identifier(name)}"
97
+ end
14
98
  end
15
99
 
16
100
  module DatasetMethods
@@ -43,14 +127,19 @@ module Sequel
43
127
  "#{expression} #{quote_identifier(aliaz)}"
44
128
  end
45
129
 
130
+ # Oracle uses the SQL standard of only doubling ' inside strings.
131
+ def literal_string(v)
132
+ "'#{v.gsub("'", "''")}'"
133
+ end
134
+
46
135
  def select_clause_order
47
136
  SELECT_CLAUSE_ORDER
48
137
  end
49
138
 
50
139
  # Oracle requires a subselect to do limit and offset
51
- def select_limit_sql(sql, opts)
52
- if limit = opts[:limit]
53
- if (offset = opts[:offset]) && (offset > 0)
140
+ def select_limit_sql(sql)
141
+ if limit = @opts[:limit]
142
+ if (offset = @opts[:offset]) && (offset > 0)
54
143
  sql.replace("SELECT * FROM (SELECT raw_sql_.*, ROWNUM raw_rnum_ FROM(#{sql}) raw_sql_ WHERE ROWNUM <= #{limit + offset}) WHERE raw_rnum_ > #{offset}")
55
144
  else
56
145
  sql.replace("SELECT * FROM (#{sql}) WHERE ROWNUM <= #{limit}")
@@ -56,12 +56,12 @@ module Sequel
56
56
 
57
57
  SELECT_CURRVAL = "SELECT currval('%s')".freeze
58
58
  SELECT_CUSTOM_SEQUENCE = proc do |schema, table| <<-end_sql
59
- SELECT '"' || name.nspname || '"."' || CASE
59
+ SELECT '"' || name.nspname || '".' || CASE
60
60
  WHEN split_part(def.adsrc, '''', 2) ~ '.' THEN
61
61
  substr(split_part(def.adsrc, '''', 2),
62
62
  strpos(split_part(def.adsrc, '''', 2), '.')+1)
63
63
  ELSE split_part(def.adsrc, '''', 2)
64
- END || '"'
64
+ END
65
65
  FROM pg_class t
66
66
  JOIN pg_namespace name ON (t.relnamespace = name.oid)
67
67
  JOIN pg_attribute attr ON (t.oid = attrelid)
@@ -86,7 +86,7 @@ module Sequel
86
86
  end_sql
87
87
  end
88
88
  SELECT_SERIAL_SEQUENCE = proc do |schema, table| <<-end_sql
89
- SELECT '"' || name.nspname || '"."' || seq.relname || '"'
89
+ SELECT '"' || name.nspname || '".' || seq.relname || ''
90
90
  FROM pg_class seq, pg_attribute attr, pg_depend dep,
91
91
  pg_namespace name, pg_constraint cons
92
92
  WHERE seq.oid = dep.objid
@@ -172,7 +172,6 @@ module Sequel
172
172
  SQL_ROLLBACK = 'ROLLBACK'.freeze
173
173
  SQL_RELEASE_SAVEPOINT = 'RELEASE SAVEPOINT autopoint_%d'.freeze
174
174
  SYSTEM_TABLE_REGEXP = /^pg|sql/.freeze
175
- TYPES = Sequel::Database::TYPES.merge(File=>'bytea', String=>'text')
176
175
 
177
176
  # Creates the function in the database. Arguments:
178
177
  # * name : name of the function to create
@@ -222,6 +221,11 @@ module Sequel
222
221
  self << create_trigger_sql(table, name, function, opts)
223
222
  end
224
223
 
224
+ # PostgreSQL uses the :postgres database type.
225
+ def database_type
226
+ :postgres
227
+ end
228
+
225
229
  # Drops the function from the database. Arguments:
226
230
  # * name : name of the function to drop
227
231
  # * opts : options hash:
@@ -261,6 +265,34 @@ module Sequel
261
265
  self << drop_trigger_sql(table, name, opts)
262
266
  end
263
267
 
268
+ # Return a hash containing index information. Hash keys are index name symbols.
269
+ # Values are subhashes with two keys, :columns and :unique. The value of :columns
270
+ # is an array of symbols of column names. The value of :unique is true or false
271
+ # depending on if the index is unique.
272
+ def indexes(table)
273
+ m = output_identifier_meth
274
+ im = input_identifier_meth
275
+ schema, table = schema_and_table(table)
276
+ ds = metadata_dataset.
277
+ from(:pg_class___tab).
278
+ join(:pg_index___ind, :indrelid=>:oid, im.call(table)=>:relname).
279
+ join(:pg_class___indc, :oid=>:indexrelid).
280
+ join(:pg_attribute___att, :attrelid=>:tab__oid, :attnum=>SQL::Function.new(:ANY, :ind__indkey)).
281
+ filter(:indc__relkind=>'i', :ind__indisprimary=>false).
282
+ exclude(0=>SQL::Function.new(:ANY, :ind__indkey)).
283
+ order(:indc__relname, (0...32).map{|x| [SQL::Subscript.new(:ind__indkey, [x]), x]}.case(32, :att__attnum)).
284
+ select(:indc__relname___name, :ind__indisunique___unique, :att__attname___column)
285
+
286
+ ds.join!(:pg_namespace___nsp, :oid=>:tab__relnamespace, :nspname=>schema) if schema
287
+
288
+ indexes = {}
289
+ ds.each do |r|
290
+ i = indexes[m.call(r[:name])] ||= {:columns=>[], :unique=>r[:unique]}
291
+ i[:columns] << m.call(r[:column])
292
+ end
293
+ indexes
294
+ end
295
+
264
296
  # Dataset containing all current database locks
265
297
  def locks
266
298
  dataset.from(:pg_class).join(:pg_locks, :relation=>:relfilenode).select(:pg_class__relname, Sequel::SQL::ColumnAll.new(:pg_locks))
@@ -287,6 +319,16 @@ module Sequel
287
319
  synchronize(opts[:server]){|con| con.sequence(*schema_and_table(table))}
288
320
  end
289
321
  end
322
+
323
+ # Reset the primary key sequence for the given table, baseing it on the
324
+ # maximum current value of the table's primary key.
325
+ def reset_primary_key_sequence(table)
326
+ pk = SQL::Identifier.new(primary_key(table))
327
+ seq = primary_key_sequence(table)
328
+ db = self
329
+ seq_ds = db.from(seq.lit)
330
+ get{setval(seq, db[table].select{coalesce(max(pk)+seq_ds.select{:increment_by}, seq_ds.select(:min_value))}, false)}
331
+ end
290
332
 
291
333
  # PostgreSQL uses SERIAL psuedo-type instead of AUTOINCREMENT for
292
334
  # managing incrementing primary keys.
@@ -313,9 +355,10 @@ module Sequel
313
355
  # * :schema - The schema to search (default_schema by default)
314
356
  # * :server - The server to use
315
357
  def table_exists?(table, opts={})
358
+ im = input_identifier_meth
316
359
  schema, table = schema_and_table(table)
317
360
  opts[:schema] ||= schema
318
- tables(opts){|ds| !ds.first(:relname=>ds.send(:input_identifier, table)).nil?}
361
+ tables(opts){|ds| !ds.first(:relname=>im.call(table)).nil?}
319
362
  end
320
363
 
321
364
  # Array of symbols specifying table names in the current database.
@@ -326,22 +369,16 @@ module Sequel
326
369
  # * :schema - The schema to search (default_schema by default)
327
370
  # * :server - The server to use
328
371
  def tables(opts={})
329
- ds = self[:pg_class].filter(:relkind=>'r').select(:relname).exclude(SQL::StringExpression.like(:relname, SYSTEM_TABLE_REGEXP)).server(opts[:server])
372
+ ds = metadata_dataset.from(:pg_class).filter(:relkind=>'r').select(:relname).exclude(SQL::StringExpression.like(:relname, SYSTEM_TABLE_REGEXP)).server(opts[:server])
330
373
  ds.join!(:pg_namespace, :oid=>:relnamespace, :nspname=>(opts[:schema]||default_schema).to_s) if opts[:schema] || default_schema
331
- ds.identifier_input_method = nil
332
- ds.identifier_output_method = nil
333
- ds2 = dataset
334
- block_given? ? yield(ds) : ds.map{|r| ds2.send(:output_identifier, r[:relname])}
374
+ m = output_identifier_meth
375
+ block_given? ? yield(ds) : ds.map{|r| m.call(r[:relname])}
335
376
  end
336
377
 
337
378
  # PostgreSQL supports multi-level transactions using save points.
338
379
  # To use a savepoint instead of reusing the current transaction,
339
380
  # use the :savepoint=>true option.
340
381
  def transaction(opts={})
341
- unless opts.is_a?(Hash)
342
- Deprecation.deprecate('Passing an argument other than a Hash to Database#transaction', "Use DB.transaction(:server=>#{opts.inspect})")
343
- opts = {:server=>opts}
344
- end
345
382
  synchronize(opts[:server]) do |conn|
346
383
  return yield(conn) if @transactions.include?(Thread.current) and !opts[:savepoint]
347
384
  conn.transaction_depth ||= 0
@@ -388,7 +425,7 @@ module Sequel
388
425
  end
389
426
 
390
427
  private
391
-
428
+
392
429
  # SQL statement to create database function.
393
430
  def create_function_sql(name, definition, opts={})
394
431
  args = opts[:args]
@@ -511,10 +548,17 @@ module Sequel
511
548
  "ALTER TABLE #{quote_schema_table(name)} RENAME TO #{quote_identifier(schema_and_table(new_name).last)}"
512
549
  end
513
550
 
551
+ # PostgreSQL's autoincrementing primary keys are of type integer or bigint
552
+ # using a nextval function call as a default.
553
+ def schema_autoincrementing_primary_key?(schema)
554
+ super and schema[:db_type] =~ /\A(?:integer|bigint)\z/io and schema[:default]=~/\Anextval/io
555
+ end
556
+
514
557
  # The dataset used for parsing table schemas, using the pg_* system catalogs.
515
558
  def schema_parse_table(table_name, opts)
516
- ds2 = dataset
517
- ds = dataset.select(:pg_attribute__attname___name,
559
+ m = output_identifier_meth
560
+ m2 = input_identifier_meth
561
+ ds = metadata_dataset.select(:pg_attribute__attname___name,
518
562
  SQL::Function.new(:format_type, :pg_type__oid, :pg_attribute__atttypmod).as(:db_type),
519
563
  SQL::Function.new(:pg_get_expr, :pg_attrdef__adbin, :pg_class__oid).as(:default),
520
564
  SQL::BooleanExpression.new(:NOT, :pg_attribute__attnotnull).as(:allow_null),
@@ -526,15 +570,13 @@ module Sequel
526
570
  left_outer_join(:pg_index, :indrelid=>:pg_class__oid, :indisprimary=>true).
527
571
  filter(:pg_attribute__attisdropped=>false).
528
572
  filter{|o| o.pg_attribute__attnum > 0}.
529
- filter(:pg_class__relname=>ds2.send(:input_identifier, table_name)).
573
+ filter(:pg_class__relname=>m2.call(table_name)).
530
574
  order(:pg_attribute__attnum)
531
575
  ds.join!(:pg_namespace, :oid=>:pg_class__relnamespace, :nspname=>(opts[:schema] || default_schema).to_s) if opts[:schema] || default_schema
532
- ds.identifier_input_method = nil
533
- ds.identifier_output_method = nil
534
576
  ds.map do |row|
535
577
  row[:default] = nil if blank_object?(row[:default])
536
578
  row[:type] = schema_column_type(row[:db_type])
537
- [ds2.send(:output_identifier, row.delete(:name)), row]
579
+ [m.call(row.delete(:name)), row]
538
580
  end
539
581
  end
540
582
 
@@ -543,9 +585,23 @@ module Sequel
543
585
  "(#{Array(args).map{|a| Array(a).reverse.join(' ')}.join(', ')})"
544
586
  end
545
587
 
546
- # Override the standard type conversions with PostgreSQL specific ones
547
- def type_literal_base(column)
548
- TYPES[column[:type]]
588
+ # PostgreSQL uses the bytea data type for blobs
589
+ def type_literal_generic_file(column)
590
+ :bytea
591
+ end
592
+
593
+ # PostgreSQL prefers the text datatype. If a fixed size is requested,
594
+ # the char type is used. If the text type is specifically
595
+ # disallowed or there is a size specified, use the varchar type.
596
+ # Otherwise use the type type.
597
+ def type_literal_generic_string(column)
598
+ if column[:fixed]
599
+ "char(#{column[:size]||255})"
600
+ elsif column[:text] == false or column[:size]
601
+ "varchar(#{column[:size]||255})"
602
+ else
603
+ :text
604
+ end
549
605
  end
550
606
  end
551
607
 
@@ -712,9 +768,9 @@ module Sequel
712
768
 
713
769
  # PostgreSQL is smart and can use parantheses around all datasets to get
714
770
  # the correct answers.
715
- def select_compounds_sql(sql, opts)
716
- return unless opts[:compounds]
717
- opts[:compounds].each do |type, dataset, all|
771
+ def select_compounds_sql(sql)
772
+ return unless @opts[:compounds]
773
+ @opts[:compounds].each do |type, dataset, all|
718
774
  sql.replace("(#{sql} #{type.to_s.upcase}#{' ALL' if all} #{subselect_sql(dataset)})")
719
775
  end
720
776
  end
@@ -725,8 +781,8 @@ module Sequel
725
781
  end
726
782
 
727
783
  # Support lock mode, allowing FOR SHARE and FOR UPDATE queries.
728
- def select_lock_sql(sql, opts)
729
- case opts[:lock]
784
+ def select_lock_sql(sql)
785
+ case @opts[:lock]
730
786
  when :update
731
787
  sql << FOR_UPDATE
732
788
  when :share
@@ -4,6 +4,11 @@ module Sequel
4
4
  module Progress
5
5
  module DatabaseMethods
6
6
 
7
+ # Progress uses the :progress database type.
8
+ def database_type
9
+ :progress
10
+ end
11
+
7
12
  def dataset(opts = nil)
8
13
  ds = super
9
14
  ds.extend(DatasetMethods)
@@ -25,9 +30,9 @@ module Sequel
25
30
 
26
31
  # Progress uses TOP for limit, but it is only supported in Progress 10.
27
32
  # The Progress adapter targets Progress 9, so it silently ignores the option.
28
- def select_limit_sql(sql, opts)
29
- raise(Error, "OFFSET not supported") if opts[:offset]
30
- #sql << " TOP #{opts[:limit]}" if opts[:limit]
33
+ def select_limit_sql(sql)
34
+ raise(Error, "OFFSET not supported") if @opts[:offset]
35
+ #sql << " TOP #{@opts[:limit]}" if @opts[:limit]
31
36
  end
32
37
  end
33
38
  end
@@ -4,10 +4,10 @@ module Sequel
4
4
  module SQLite
5
5
  module DatabaseMethods
6
6
  AUTO_VACUUM = [:none, :full, :incremental].freeze
7
+ PRIMARY_KEY_INDEX_RE = /\Asqlite_autoindex_/.freeze
7
8
  SYNCHRONOUS = [:off, :normal, :full].freeze
8
9
  TABLES_FILTER = "type = 'table' AND NOT name = 'sqlite_sequence'"
9
10
  TEMP_STORE = [:default, :file, :memory].freeze
10
- TYPES = Sequel::Database::TYPES.merge(Bignum=>'integer')
11
11
 
12
12
  # Run all alter_table commands in a transaction. This is technically only
13
13
  # needed for drop column.
@@ -27,6 +27,34 @@ module Sequel
27
27
  pragma_set(:auto_vacuum, value)
28
28
  end
29
29
 
30
+ # SQLite uses the :sqlite database type.
31
+ def database_type
32
+ :sqlite
33
+ end
34
+
35
+ # Return a hash containing index information. Hash keys are index name symbols.
36
+ # Values are subhashes with two keys, :columns and :unique. The value of :columns
37
+ # is an array of symbols of column names. The value of :unique is true or false
38
+ # depending on if the index is unique.
39
+ def indexes(table)
40
+ m = output_identifier_meth
41
+ im = input_identifier_meth
42
+ indexes = {}
43
+ begin
44
+ metadata_dataset.with_sql("PRAGMA index_list(?)", im.call(table)).each do |r|
45
+ next if r[:name] =~ PRIMARY_KEY_INDEX_RE
46
+ indexes[m.call(r[:name])] = {:unique=>r[:unique].to_i==1}
47
+ end
48
+ rescue Sequel::DatabaseError
49
+ nil
50
+ else
51
+ indexes.each do |k, v|
52
+ v[:columns] = metadata_dataset.with_sql("PRAGMA index_info(?)", im.call(k)).map(:name).map{|x| m.call(x)}
53
+ end
54
+ end
55
+ indexes
56
+ end
57
+
30
58
  # Get the value of the given PRAGMA.
31
59
  def pragma_get(name)
32
60
  self["PRAGMA #{name}"].single_value
@@ -53,11 +81,8 @@ module Sequel
53
81
  # Options:
54
82
  # * :server - Set the server to use.
55
83
  def tables(opts={})
56
- ds = self[:sqlite_master].server(opts[:server]).filter(TABLES_FILTER)
57
- ds.identifier_output_method = nil
58
- ds.identifier_input_method = nil
59
- ds2 = dataset
60
- ds.map{|r| ds2.send(:output_identifier, r[:name])}
84
+ m = output_identifier_meth
85
+ metadata_dataset.from(:sqlite_master).server(opts[:server]).filter(TABLES_FILTER).map{|r| m.call(r[:name])}
61
86
  end
62
87
 
63
88
  # A symbol signifying the value of the temp_store PRAGMA.
@@ -141,6 +166,11 @@ module Sequel
141
166
  cols
142
167
  end
143
168
 
169
+ # Allow use without a generator, needed for the alter table hackery that Sequel allows.
170
+ def column_list_sql(generator)
171
+ generator.is_a?(Schema::Generator) ? super : generator.map{|c| column_definition_sql(c)}.join(', ')
172
+ end
173
+
144
174
  # The array of column schema hashes, except for the ones given in opts[:except]
145
175
  def defined_columns_for(table, opts={})
146
176
  cols = parse_pragma(table, {})
@@ -164,10 +194,7 @@ module Sequel
164
194
 
165
195
  # Parse the output of the table_info pragma
166
196
  def parse_pragma(table_name, opts)
167
- ds2 = dataset
168
- ds = self["PRAGMA table_info(?)", ds2.send(:input_identifier, table_name)]
169
- ds.identifier_output_method = nil
170
- ds.map do |row|
197
+ metadata_dataset.with_sql("PRAGMA table_info(?)", input_identifier_meth.call(table_name)).map do |row|
171
198
  row.delete(:cid)
172
199
  row[:allow_null] = row.delete(:notnull).to_i == 0
173
200
  row[:default] = row.delete(:dflt_value)
@@ -179,18 +206,26 @@ module Sequel
179
206
  end
180
207
  end
181
208
 
209
+ # SQLite treats integer primary keys as autoincrementing (alias of rowid).
210
+ def schema_autoincrementing_primary_key?(schema)
211
+ super and schema[:db_type].downcase == 'integer'
212
+ end
213
+
182
214
  # SQLite supports schema parsing using the table_info PRAGMA, so
183
215
  # parse the output of that into the format Sequel expects.
184
216
  def schema_parse_table(table_name, opts)
185
- ds = dataset
217
+ m = output_identifier_meth
186
218
  parse_pragma(table_name, opts).map do |row|
187
- [ds.send(:output_identifier, row.delete(:name)), row]
219
+ [m.call(row.delete(:name)), row]
188
220
  end
189
221
  end
190
222
 
191
- # Override the standard type conversions with SQLite specific ones
192
- def type_literal_base(column)
193
- TYPES[column[:type]]
223
+ # SQLite uses the integer data type even for bignums. This is because they
224
+ # are both stored internally as text, and converted when returned from
225
+ # the database. Using an integer type instead of bigint makes it more likely
226
+ # that software will automatically return the column as an integer.
227
+ def type_literal_generic_bignum(column)
228
+ :integer
194
229
  end
195
230
  end
196
231
 
@@ -217,14 +252,8 @@ module Sequel
217
252
  # SQLite performs a TRUNCATE style DELETE if no filter is specified.
218
253
  # Since we want to always return the count of records, add a condition
219
254
  # that is always true and then delete.
220
- def delete(opts = (defarg=true;{}))
221
- # check if no filter is specified
222
- if defarg
223
- @opts[:where] ? super() : filter(1=>1).delete
224
- else
225
- opts = @opts.merge(opts)
226
- super(opts[:where] ? opts : opts.merge(:where=>{1=>1}))
227
- end
255
+ def delete
256
+ @opts[:where] ? super : filter(1=>1).delete
228
257
  end
229
258
 
230
259
  # Insert the values into the database.
@@ -256,6 +285,7 @@ module Sequel
256
285
 
257
286
  # SQLite uses string literals instead of identifiers in AS clauses.
258
287
  def as_sql(expression, aliaz)
288
+ aliaz = aliaz.value if aliaz.is_a?(SQL::Identifier)
259
289
  "#{expression} AS #{literal(aliaz.to_s)}"
260
290
  end
261
291
  end