sequel 3.30.0 → 3.31.0
Sign up to get free protection for your applications and to get access to all the features.
- data/CHANGELOG +40 -0
- data/Rakefile +12 -2
- data/doc/association_basics.rdoc +28 -0
- data/doc/dataset_filtering.rdoc +8 -0
- data/doc/opening_databases.rdoc +1 -0
- data/doc/release_notes/3.31.0.txt +146 -0
- data/lib/sequel/adapters/jdbc.rb +7 -6
- data/lib/sequel/adapters/jdbc/derby.rb +5 -0
- data/lib/sequel/adapters/jdbc/h2.rb +6 -1
- data/lib/sequel/adapters/mock.rb +21 -2
- data/lib/sequel/adapters/shared/db2.rb +10 -0
- data/lib/sequel/adapters/shared/mssql.rb +40 -5
- data/lib/sequel/adapters/shared/mysql.rb +19 -2
- data/lib/sequel/adapters/shared/oracle.rb +13 -1
- data/lib/sequel/adapters/shared/postgres.rb +52 -8
- data/lib/sequel/adapters/shared/sqlite.rb +4 -3
- data/lib/sequel/adapters/utils/stored_procedures.rb +1 -11
- data/lib/sequel/database/schema_generator.rb +9 -2
- data/lib/sequel/dataset/actions.rb +37 -19
- data/lib/sequel/dataset/features.rb +10 -0
- data/lib/sequel/dataset/prepared_statements.rb +0 -10
- data/lib/sequel/dataset/query.rb +13 -1
- data/lib/sequel/dataset/sql.rb +6 -1
- data/lib/sequel/model/associations.rb +14 -4
- data/lib/sequel/model/base.rb +10 -0
- data/lib/sequel/plugins/serialization.rb +82 -43
- data/lib/sequel/version.rb +1 -1
- data/spec/adapters/mssql_spec.rb +46 -0
- data/spec/adapters/mysql_spec.rb +3 -0
- data/spec/adapters/postgres_spec.rb +61 -24
- data/spec/core/database_spec.rb +31 -18
- data/spec/core/dataset_spec.rb +90 -13
- data/spec/core/mock_adapter_spec.rb +37 -0
- data/spec/extensions/instance_filters_spec.rb +1 -0
- data/spec/extensions/nested_attributes_spec.rb +1 -1
- data/spec/extensions/serialization_spec.rb +49 -5
- data/spec/extensions/sharding_spec.rb +1 -1
- data/spec/integration/associations_test.rb +15 -0
- data/spec/integration/dataset_test.rb +71 -0
- data/spec/integration/prepared_statement_test.rb +8 -0
- data/spec/model/association_reflection_spec.rb +27 -0
- data/spec/model/associations_spec.rb +18 -3
- data/spec/model/base_spec.rb +20 -0
- data/spec/model/eager_loading_spec.rb +21 -0
- metadata +4 -2
data/CHANGELOG
CHANGED
@@ -1,3 +1,43 @@
|
|
1
|
+
=== 3.31.0 (2012-01-03)
|
2
|
+
|
3
|
+
* Dataset#from no longer handles :a__b__c___d as a.b.c AS d (jeremyevans)
|
4
|
+
|
5
|
+
* Support many_to_one associations with the same name as their column, using the :key_column option (jeremyevans)
|
6
|
+
|
7
|
+
* Add Model.def_column_alias for defining alias methods for columns (jeremyevans)
|
8
|
+
|
9
|
+
* Support :server option in Dataset#import and #multi_insert (jeremyevans)
|
10
|
+
|
11
|
+
* Respect existing RETURNING/OUTPUT clauses in #import/#multi_insert on PostgreSQL/MSSQL (jeremyevans)
|
12
|
+
|
13
|
+
* Support :return=>:primary_key option to Dataset#import and #multi_insert (jeremyevans)
|
14
|
+
|
15
|
+
* Correctly handle return value for Dataset#insert with column array and value array on PostgreSQL <8.2 (jeremyevans)
|
16
|
+
|
17
|
+
* Dataset#insert_multiple now returns an array of inserted primary keys (jeremyevans) (#408)
|
18
|
+
|
19
|
+
* Support RETURNING with DELETE and UPDATE on PostgreSQL 8.2+ (funny-falcon)
|
20
|
+
|
21
|
+
* Raise error if tables from two separate schema are detected when parsing the schema for a single table on PostgreSQL (jeremyevans)
|
22
|
+
|
23
|
+
* Handle clob types as string instead of blob on H2 (jeremyevans)
|
24
|
+
|
25
|
+
* Add database type support to the mock adapter, e.g. mock://postgres (jeremyevans)
|
26
|
+
|
27
|
+
* Allow creation of full text indexes on Microsoft SQL Server, but you need to provide a :key_index option (jeremyevans)
|
28
|
+
|
29
|
+
* Allow Dataset#full_text_search usage with prepared statements (jeremyevans)
|
30
|
+
|
31
|
+
* Make Dataset#exists use a PlaceholderLiteralString so it works with prepared statements (jeremyevans)
|
32
|
+
|
33
|
+
* Fix Dataset#empty? for datasets with offsets when offset support is emulated (jeremyevans)
|
34
|
+
|
35
|
+
* Add Dataset#group_rollup and #group_cube methods for GROUP BY ROLLUP and CUBE support (jeremyevans)
|
36
|
+
|
37
|
+
* Add support for custom serialization formats to the serialization plugin (jeremyevans)
|
38
|
+
|
39
|
+
* Support a :login_timeout option in the jdbc adapter (glebpom) (#406)
|
40
|
+
|
1
41
|
=== 3.30.0 (2011-12-01)
|
2
42
|
|
3
43
|
* Handle usage of on_duplicate_key_update in MySQL prepared statements (jeremyevans) (#404)
|
data/Rakefile
CHANGED
@@ -117,15 +117,25 @@ begin
|
|
117
117
|
end
|
118
118
|
end
|
119
119
|
|
120
|
-
spec_with_cov = lambda do |name, files, d|
|
120
|
+
sc = spec_with_cov = lambda do |name, files, d|
|
121
121
|
spec.call(name, files, d)
|
122
122
|
t = spec.call("#{name}_cov", files, "#{d} with coverage")
|
123
123
|
t.rcov = true
|
124
124
|
t.rcov_opts = File.read("spec/rcov.opts").split("\n")
|
125
|
+
t
|
126
|
+
end
|
127
|
+
|
128
|
+
if RUBY_VERSION >= '1.8.7'
|
129
|
+
eval <<-END
|
130
|
+
spec_with_cov = lambda do |*x, &b|
|
131
|
+
t = sc.call(*x)
|
132
|
+
b.call(t) if b
|
133
|
+
end
|
134
|
+
END
|
125
135
|
end
|
126
136
|
|
127
137
|
task :default => [:spec]
|
128
|
-
spec_with_cov.call("spec", Dir["spec/{core,model}/*_spec.rb"], "Run core and model specs")
|
138
|
+
spec_with_cov.call("spec", Dir["spec/{core,model}/*_spec.rb"], "Run core and model specs"){|t| t.rcov_opts.concat(%w'--exclude "lib/sequel/adapters/([a-ln-z]|m[a-np-z])"')}
|
129
139
|
spec.call("spec_core", Dir["spec/core/*_spec.rb"], "Run core specs")
|
130
140
|
spec.call("spec_model", Dir["spec/model/*_spec.rb"], "Run model specs")
|
131
141
|
spec_with_cov.call("spec_plugin", Dir["spec/extensions/*_spec.rb"], "Run extension/plugin specs")
|
data/doc/association_basics.rdoc
CHANGED
@@ -1483,6 +1483,34 @@ If the default modification methods would not do what you want, and you
|
|
1483
1483
|
don't plan on overriding the internal modification methods to do what you
|
1484
1484
|
want, it may be best to set this option to true.
|
1485
1485
|
|
1486
|
+
==== :key_column [+many_to_one+]
|
1487
|
+
|
1488
|
+
This option exists to make it possible to create a +many_to_one+ association
|
1489
|
+
with the same name as the foreign key it uses. Before this option was
|
1490
|
+
introduced, this was problematic, since the :key option was used to refer
|
1491
|
+
both to the model methods and the underlying database columns (which are
|
1492
|
+
usually the same).
|
1493
|
+
|
1494
|
+
If you have a legacy database with a foreign key that is the same as the name
|
1495
|
+
of the association you would like to use, you should create a column alias
|
1496
|
+
for the foreign key and use this option:
|
1497
|
+
|
1498
|
+
# Example schema:
|
1499
|
+
# albums artists
|
1500
|
+
# :id /--> :id
|
1501
|
+
# :artist --/ :name
|
1502
|
+
# :name
|
1503
|
+
|
1504
|
+
class Album < Sequel::Model
|
1505
|
+
def_column_alias(:artist_id, :artist)
|
1506
|
+
many_to_one :artist, :key_column=>:artist
|
1507
|
+
end
|
1508
|
+
|
1509
|
+
Note that if you have control over the database, it's better to just rename
|
1510
|
+
the foreign key column than use this option. Similarly, if you can think
|
1511
|
+
of another association name that will work, it's wiser to rename the
|
1512
|
+
association than use this option. Think of this option as a last resort.
|
1513
|
+
|
1486
1514
|
==== :validate
|
1487
1515
|
|
1488
1516
|
Set to false to not validate when implicitly saving any associated object.
|
data/doc/dataset_filtering.rdoc
CHANGED
@@ -59,6 +59,14 @@ Ranges (both inclusive and exclusive) can also be used:
|
|
59
59
|
items.filter(:price => 100...200).sql
|
60
60
|
#=> "SELECT * FROM items WHERE (price >= 100 AND price < 200)"
|
61
61
|
|
62
|
+
== Filtering using an array
|
63
|
+
|
64
|
+
If you need to select multiple items from a dataset, you can supply an array:
|
65
|
+
|
66
|
+
item_array = [1, 38, 47, 99]
|
67
|
+
items.filter(:id => item_array).sql
|
68
|
+
#=> "SELECT * FROM items WHERE (id IN (1, 38, 47, 99))"
|
69
|
+
|
62
70
|
== Filtering using expressions
|
63
71
|
|
64
72
|
Sequel allows you to use ruby expressions directly in the call to filter:
|
data/doc/opening_databases.rdoc
CHANGED
@@ -276,6 +276,7 @@ The following additional options are supported:
|
|
276
276
|
Setting to false roughly doubles performance when selecting large numbers of rows.
|
277
277
|
Note that you can't provide this option inside the connection string (as that is passed
|
278
278
|
directly to JDBC), you have to pass it as a separate option.
|
279
|
+
:login_timeout :: Set the login timeout on the JDBC connection (in seconds).
|
279
280
|
|
280
281
|
=== mysql
|
281
282
|
|
@@ -0,0 +1,146 @@
|
|
1
|
+
= New Features
|
2
|
+
|
3
|
+
* The serialization plugin can now support custom serialization
|
4
|
+
formats, by supplying a serializer/deserializer pair of
|
5
|
+
callable objects. You can also register custom deserializers
|
6
|
+
via Sequel::Plugins::Serialization.register_format, so that
|
7
|
+
they can be referenced by name. Example:
|
8
|
+
|
9
|
+
Sequel::Plugins::Serialization.register_format(:reverse,
|
10
|
+
lambda{|v| v.reverse},
|
11
|
+
lambda{|v| v.reverse})
|
12
|
+
class User < Sequel::Model
|
13
|
+
serialize_attributes :reverse, :password
|
14
|
+
end
|
15
|
+
|
16
|
+
* Dataset#import and #multi_insert now support a
|
17
|
+
:return=>:primary_key option. When this option is used, the
|
18
|
+
methods return an array of primary key values, one for each
|
19
|
+
inserted row. Usage of this option on MySQL requires that a
|
20
|
+
separate query be issued per row (instead of the single
|
21
|
+
query for all rows that MySQL would usually use).
|
22
|
+
|
23
|
+
* PostgreSQL can now use Dataset#returning in conjunction with
|
24
|
+
import/multi_insert to set a custom column to return.
|
25
|
+
|
26
|
+
* Microsoft SQL Server can now use Dataset#output in conjection with
|
27
|
+
import/multi_insert to set a custom column to return.
|
28
|
+
|
29
|
+
* Dataset#import and #multi_insert now respect a :server option to
|
30
|
+
set the server/shard on which to execute the queries.
|
31
|
+
Additionally, options given to this method are also passed to
|
32
|
+
Dataset#transaction.
|
33
|
+
|
34
|
+
* Dataset#insert_multiple now returns an array of inserted primary
|
35
|
+
keys.
|
36
|
+
|
37
|
+
* Model.def_column_alias has been added to make it easy to create
|
38
|
+
alias methods for columns. This is useful if you have a legacy
|
39
|
+
database and want to create friendly method names for the
|
40
|
+
underlying columns. Note that this alias only affects the setter
|
41
|
+
and getter methods. This does not affect the dataset level, so you
|
42
|
+
still need to use the actual column names in dataset filters.
|
43
|
+
|
44
|
+
* many_to_one associations can now have the same name as the related
|
45
|
+
foreign key column, using the :key_column option. Use of this
|
46
|
+
feature is not recommended, as it is much better to either rename
|
47
|
+
the column or rename the association. Here's an example of usage:
|
48
|
+
|
49
|
+
# Example schema:
|
50
|
+
# albums artists
|
51
|
+
# :id /--> :id
|
52
|
+
# :artist --/ :name
|
53
|
+
# :name
|
54
|
+
class Album < Sequel::Model
|
55
|
+
def_column_alias(:artist_id, :artist)
|
56
|
+
many_to_one :artist, :key_column=>:artist
|
57
|
+
end
|
58
|
+
|
59
|
+
* The mock adapter can now mock out database types, by providing a
|
60
|
+
shared adapter name as the host (e.g. mock://postgres). This
|
61
|
+
emulation is not perfect, but in most cases it allows you to see
|
62
|
+
what SQL Sequel would generate on a given database without needing
|
63
|
+
to install the required database driver.
|
64
|
+
|
65
|
+
* Sequel now supports creating full text indexes on Microsoft SQL
|
66
|
+
Server. Before using it, you must have previously setup a default
|
67
|
+
full text search catalog, and you need to provide a :key_index
|
68
|
+
option with an index name symbol.
|
69
|
+
|
70
|
+
* Dataset#group_rollup and #group_cube methods have been added for
|
71
|
+
GROUP BY ROLLUP and GROUP BY CUBE support. These features are in
|
72
|
+
a recent SQL standard, and they are supported to various degrees on
|
73
|
+
Microsoft SQL Server, DB2, Oracle, MySQL, and Derby.
|
74
|
+
|
75
|
+
* Dataset#full_text_search on Microsoft SQL Server now supports
|
76
|
+
multiple search terms.
|
77
|
+
|
78
|
+
* The jdbc adapter now supports a :login_timeout option, giving the
|
79
|
+
timeout in seconds.
|
80
|
+
|
81
|
+
= Other Improvements
|
82
|
+
|
83
|
+
* Dataset#exists can now be used with prepared statement
|
84
|
+
placeholders.
|
85
|
+
|
86
|
+
* Dataset#full_text_search can now be used with prepared statement
|
87
|
+
placeholders on PostgreSQL, MySQL, and Microsoft SQL Server.
|
88
|
+
|
89
|
+
* If tables from two separate schema are detected when parsing the
|
90
|
+
schema for a table on PostgreSQL, an error is now raised.
|
91
|
+
Previously, no error was raised, which led to weird errors later,
|
92
|
+
such as duplicate columns in a model's primary_key.
|
93
|
+
|
94
|
+
* RETURNING is now supported with UPDATE/DELETE on PostgreSQL 8.2+.
|
95
|
+
Previously, Sequel only supported it on 9.1+, but PostgreSQL
|
96
|
+
introduced support for it in 8.2.
|
97
|
+
|
98
|
+
* The shared postgres adapter now correctly handles the return value
|
99
|
+
for Dataset#insert if you provide a separate column array and value
|
100
|
+
array on PostgreSQL < 8.2.
|
101
|
+
|
102
|
+
* Handle case in the PostgreSQL adapter where the server version
|
103
|
+
cannot be determined via a query.
|
104
|
+
|
105
|
+
* H2 clob types are now treated as string instead of as blob.
|
106
|
+
Treating clob as blob breaks on H2, as it doesn't automatically
|
107
|
+
hex-unescape the input for clobs as it does for blobs.
|
108
|
+
|
109
|
+
* Dataset#empty? now works correctly when the dataset has an offset
|
110
|
+
and offset support is being emulated.
|
111
|
+
|
112
|
+
* The mock adapter no longer defaults to downcasing identifiers on
|
113
|
+
output.
|
114
|
+
|
115
|
+
= Backwards Compatibility
|
116
|
+
|
117
|
+
* Dataset#exists now returns a PlaceholderLiteralString instead of a
|
118
|
+
LiteralString, which could potentially break some code. If you
|
119
|
+
would like a String returned, you can pass the returned object to
|
120
|
+
Dataset#literal:
|
121
|
+
|
122
|
+
dataset.literal(dataset.exists)
|
123
|
+
|
124
|
+
* Dataset#from no longer handles :a__b__c___d as "a.b.c AS d". This
|
125
|
+
was not the intended behavior, and nowhere else in Sequel is a
|
126
|
+
symbol treated that way. Now, Dataset#from is consistent with the
|
127
|
+
rest of Sequel, using "a.b__c AS d". This should only affect
|
128
|
+
people in very rare cases, as most databases don't use three level
|
129
|
+
qualified tables. One exception is Microsoft SQL Server, which can
|
130
|
+
use three level qualified tables for cross-database access.
|
131
|
+
|
132
|
+
* Previously, Dataset#insert_multiple returned an array of hashes, now
|
133
|
+
it returns an array of primary key values.
|
134
|
+
|
135
|
+
* Dataset#EXRACT_CLOSE in the shared sqlite adapter has been renamed to
|
136
|
+
Dataset#EXTRACT_CLOSE.
|
137
|
+
|
138
|
+
* Dataset::StoredProcedureMethods::SQL_QUERY_TYPE and
|
139
|
+
Dataset::ArgumentMapper::SQL_QUERY_TYPE constants have been removed,
|
140
|
+
as have related sql_query_type private methods.
|
141
|
+
|
142
|
+
* The serialization plugin was significantly refactored.
|
143
|
+
Model.serialization_map now contains a callable object instead of a
|
144
|
+
Symbol, and Model.serialization_format has been removed.
|
145
|
+
Model.define_serialized_attribute_accessors private method now takes
|
146
|
+
two callable objects before the columns, instead of a single symbol.
|
data/lib/sequel/adapters/jdbc.rb
CHANGED
@@ -200,6 +200,7 @@ module Sequel
|
|
200
200
|
args = [uri(opts)]
|
201
201
|
args.concat([opts[:user], opts[:password]]) if opts[:user] && opts[:password]
|
202
202
|
begin
|
203
|
+
JavaSQL::DriverManager.setLoginTimeout(opts[:login_timeout]) if opts[:login_timeout]
|
203
204
|
JavaSQL::DriverManager.getConnection(*args)
|
204
205
|
rescue => e
|
205
206
|
raise e unless driver
|
@@ -546,17 +547,17 @@ module Sequel
|
|
546
547
|
# Execute the prepared SQL using the stored type and
|
547
548
|
# arguments derived from the hash passed to call.
|
548
549
|
def execute(sql, opts={}, &block)
|
549
|
-
super(self, {:arguments=>bind_arguments
|
550
|
+
super(self, {:arguments=>bind_arguments}.merge(opts), &block)
|
550
551
|
end
|
551
552
|
|
552
553
|
# Same as execute, explicit due to intricacies of alias and super.
|
553
554
|
def execute_dui(sql, opts={}, &block)
|
554
|
-
super(self, {:arguments=>bind_arguments
|
555
|
+
super(self, {:arguments=>bind_arguments}.merge(opts), &block)
|
555
556
|
end
|
556
557
|
|
557
558
|
# Same as execute, explicit due to intricacies of alias and super.
|
558
559
|
def execute_insert(sql, opts={}, &block)
|
559
|
-
super(self, {:arguments=>bind_arguments, :type
|
560
|
+
super(self, {:arguments=>bind_arguments, :type=>:insert}.merge(opts), &block)
|
560
561
|
end
|
561
562
|
end
|
562
563
|
|
@@ -569,17 +570,17 @@ module Sequel
|
|
569
570
|
|
570
571
|
# Execute the database stored procedure with the stored arguments.
|
571
572
|
def execute(sql, opts={}, &block)
|
572
|
-
super(@sproc_name, {:args=>@sproc_args, :sproc=>true
|
573
|
+
super(@sproc_name, {:args=>@sproc_args, :sproc=>true}.merge(opts), &block)
|
573
574
|
end
|
574
575
|
|
575
576
|
# Same as execute, explicit due to intricacies of alias and super.
|
576
577
|
def execute_dui(sql, opts={}, &block)
|
577
|
-
super(@sproc_name, {:args=>@sproc_args, :sproc=>true
|
578
|
+
super(@sproc_name, {:args=>@sproc_args, :sproc=>true}.merge(opts), &block)
|
578
579
|
end
|
579
580
|
|
580
581
|
# Same as execute, explicit due to intricacies of alias and super.
|
581
582
|
def execute_insert(sql, opts={}, &block)
|
582
|
-
super(@sproc_name, {:args=>@sproc_args, :sproc=>true, :type
|
583
|
+
super(@sproc_name, {:args=>@sproc_args, :sproc=>true, :type=>:insert}.merge(opts), &block)
|
583
584
|
end
|
584
585
|
end
|
585
586
|
|
@@ -100,6 +100,11 @@ module Sequel
|
|
100
100
|
def primary_key_index_re
|
101
101
|
PRIMARY_KEY_INDEX_RE
|
102
102
|
end
|
103
|
+
|
104
|
+
# Treat clob as string instead of blob
|
105
|
+
def schema_column_type(db_type)
|
106
|
+
db_type == 'clob' ? :string : super
|
107
|
+
end
|
103
108
|
end
|
104
109
|
|
105
110
|
# Dataset class for H2 datasets accessed via JDBC.
|
@@ -158,7 +163,7 @@ module Sequel
|
|
158
163
|
#JAVA_H2_CLOB = Java::OrgH2Jdbc::JdbcClob
|
159
164
|
|
160
165
|
class ::Sequel::JDBC::Dataset::TYPE_TRANSLATOR
|
161
|
-
def h2_clob(v)
|
166
|
+
def h2_clob(v) v.getSubString(1, v.length) end
|
162
167
|
end
|
163
168
|
|
164
169
|
H2_CLOB_METHOD = TYPE_TRANSLATOR_INSTANCE.method(:h2_clob)
|
data/lib/sequel/adapters/mock.rb
CHANGED
@@ -30,6 +30,20 @@ module Sequel
|
|
30
30
|
class Database < Sequel::Database
|
31
31
|
set_adapter_scheme :mock
|
32
32
|
|
33
|
+
# Map of database type names to module names, used for handling
|
34
|
+
# mock adapters for specific database types.
|
35
|
+
SHARED_ADAPTERS = {
|
36
|
+
'access'=>'Access',
|
37
|
+
'db2'=>'DB2',
|
38
|
+
'firebird'=>'Firebird',
|
39
|
+
'informix'=>'Informix',
|
40
|
+
'mssql'=>'MSSQL',
|
41
|
+
'mysql'=>'MySQL',
|
42
|
+
'oracle'=>'Oracle',
|
43
|
+
'postgres'=>'Postgres',
|
44
|
+
'sqlite'=>'SQLite'
|
45
|
+
}
|
46
|
+
|
33
47
|
# Set the autogenerated primary key integer
|
34
48
|
# to be returned when running an insert query.
|
35
49
|
# Argument types supported:
|
@@ -63,7 +77,7 @@ module Sequel
|
|
63
77
|
# Hash :: Always yield a single row with this hash
|
64
78
|
# Array of Hashes :: Yield separately for each hash in this array
|
65
79
|
# Array (otherwise) :: First retrieval gets the first value
|
66
|
-
#
|
80
|
+
# in the array, second gets the second value, etc.
|
67
81
|
# Proc :: Called with the select SQL query, uses
|
68
82
|
# the value returned, which should be a hash or
|
69
83
|
# array of hashes.
|
@@ -95,6 +109,11 @@ module Sequel
|
|
95
109
|
def initialize(opts={})
|
96
110
|
super
|
97
111
|
opts = @opts
|
112
|
+
if mod_name = SHARED_ADAPTERS[opts[:host]]
|
113
|
+
require "sequel/adapters/shared/#{opts[:host]}"
|
114
|
+
extend Sequel.const_get(mod_name)::DatabaseMethods
|
115
|
+
extend_datasets Sequel.const_get(mod_name)::DatasetMethods
|
116
|
+
end
|
98
117
|
self.autoid = opts[:autoid]
|
99
118
|
self.columns = opts[:columns]
|
100
119
|
self.fetch = opts[:fetch]
|
@@ -265,7 +284,7 @@ module Sequel
|
|
265
284
|
nil
|
266
285
|
end
|
267
286
|
|
268
|
-
def
|
287
|
+
def identifier_output_method_default
|
269
288
|
nil
|
270
289
|
end
|
271
290
|
end
|
@@ -226,6 +226,16 @@ module Sequel
|
|
226
226
|
end
|
227
227
|
end
|
228
228
|
|
229
|
+
# DB2 supports GROUP BY CUBE
|
230
|
+
def supports_group_cube?
|
231
|
+
true
|
232
|
+
end
|
233
|
+
|
234
|
+
# DB2 supports GROUP BY ROLLUP
|
235
|
+
def supports_group_rollup?
|
236
|
+
true
|
237
|
+
end
|
238
|
+
|
229
239
|
# DB2 does not support IS TRUE.
|
230
240
|
def supports_is_true?
|
231
241
|
false
|
@@ -234,11 +234,11 @@ module Sequel
|
|
234
234
|
# support for clustered index type
|
235
235
|
def index_definition_sql(table_name, index)
|
236
236
|
index_name = index[:name] || default_index_name(table_name, index[:columns])
|
237
|
-
|
238
|
-
if index[:
|
239
|
-
|
237
|
+
raise Error, "Partial indexes are not supported for this database" if index[:where]
|
238
|
+
if index[:type] == :full_text
|
239
|
+
"CREATE FULLTEXT INDEX ON #{quote_schema_table(table_name)} #{literal(index[:columns])} KEY INDEX #{literal(index[:key_index])}"
|
240
240
|
else
|
241
|
-
"CREATE #{'UNIQUE ' if index[:unique]}#{'CLUSTERED ' if clustered}INDEX #{quote_identifier(index_name)} ON #{quote_schema_table(table_name)} #{literal(index[:columns])}"
|
241
|
+
"CREATE #{'UNIQUE ' if index[:unique]}#{'CLUSTERED ' if index[:type] == :clustered}INDEX #{quote_identifier(index_name)} ON #{quote_schema_table(table_name)} #{literal(index[:columns])}"
|
242
242
|
end
|
243
243
|
end
|
244
244
|
end
|
@@ -345,7 +345,8 @@ module Sequel
|
|
345
345
|
|
346
346
|
# MSSQL uses the CONTAINS keyword for full text search
|
347
347
|
def full_text_search(cols, terms, opts = {})
|
348
|
-
|
348
|
+
terms = "\"#{terms.join('" OR "')}\"" if terms.is_a?(Array)
|
349
|
+
filter("CONTAINS (?, ?)", cols, terms)
|
349
350
|
end
|
350
351
|
|
351
352
|
# Use the OUTPUT clause to get the value of all columns for the newly inserted record.
|
@@ -418,6 +419,16 @@ module Sequel
|
|
418
419
|
db.server_version(@opts[:server])
|
419
420
|
end
|
420
421
|
|
422
|
+
# MSSQL 2005+ supports GROUP BY CUBE.
|
423
|
+
def supports_group_cube?
|
424
|
+
is_2005_or_later?
|
425
|
+
end
|
426
|
+
|
427
|
+
# MSSQL 2005+ supports GROUP BY ROLLUP
|
428
|
+
def supports_group_rollup?
|
429
|
+
is_2005_or_later?
|
430
|
+
end
|
431
|
+
|
421
432
|
# MSSQL supports insert_select via the OUTPUT clause.
|
422
433
|
def supports_insert_select?
|
423
434
|
supports_output_clause? && !opts[:disable_insert_output]
|
@@ -464,6 +475,24 @@ module Sequel
|
|
464
475
|
end
|
465
476
|
|
466
477
|
protected
|
478
|
+
|
479
|
+
# If returned primary keys are requested, use OUTPUT unless already set on the
|
480
|
+
# dataset. If OUTPUT is already set, use existing returning values. If OUTPUT
|
481
|
+
# is only set to return a single columns, return an array of just that column.
|
482
|
+
# Otherwise, return an array of hashes.
|
483
|
+
def _import(columns, values, opts={})
|
484
|
+
if opts[:return] == :primary_key && !@opts[:output]
|
485
|
+
output(nil, [SQL::QualifiedIdentifier.new(:inserted, first_primary_key)])._import(columns, values, opts)
|
486
|
+
elsif @opts[:output]
|
487
|
+
statements = multi_insert_sql(columns, values)
|
488
|
+
@db.transaction(opts.merge(:server=>@opts[:server])) do
|
489
|
+
statements.map{|st| with_sql(st)}
|
490
|
+
end.first.map{|v| v.length == 1 ? v.values.first : v}
|
491
|
+
else
|
492
|
+
super
|
493
|
+
end
|
494
|
+
end
|
495
|
+
|
467
496
|
# MSSQL does not allow ordering in sub-clauses unless 'top' (limit) is specified
|
468
497
|
def aggregate_dataset
|
469
498
|
(options_overlap(Sequel::Dataset::COUNT_FROM_SELF_OPTS) && !options_overlap([:limit])) ? unordered.from_self : super
|
@@ -496,6 +525,12 @@ module Sequel
|
|
496
525
|
end
|
497
526
|
alias update_from_sql delete_from2_sql
|
498
527
|
|
528
|
+
# Return the first primary key for the current table. If this table has
|
529
|
+
# multiple primary keys, this will only return one of them. Used by #_import.
|
530
|
+
def first_primary_key
|
531
|
+
@db.schema(self).map{|k, v| k if v[:primary_key] == true}.compact.first
|
532
|
+
end
|
533
|
+
|
499
534
|
# MSSQL raises an error if you try to provide more than 3 decimal places
|
500
535
|
# for a fractional timestamp. This probably doesn't work for smalldatetime
|
501
536
|
# fields.
|