sequel 0.1.7 → 0.1.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/CHANGELOG +50 -28
- data/README +1 -1
- data/Rakefile +13 -3
- data/lib/sequel/database.rb +2 -2
- data/lib/sequel/dataset.rb +180 -674
- data/lib/sequel/dataset/dataset_convenience.rb +132 -0
- data/lib/sequel/dataset/dataset_sql.rb +564 -0
- data/lib/sequel/dbi.rb +5 -4
- data/lib/sequel/model.rb +2 -2
- data/lib/sequel/mysql.rb +5 -48
- data/lib/sequel/odbc.rb +7 -12
- data/lib/sequel/postgres.rb +22 -73
- data/lib/sequel/sqlite.rb +54 -15
- data/spec/adapters/sqlite_spec.rb +104 -0
- data/spec/connection_pool_spec.rb +270 -0
- data/spec/core_ext_spec.rb +127 -0
- data/spec/database_spec.rb +366 -0
- data/spec/dataset_spec.rb +1449 -0
- data/spec/expressions_spec.rb +151 -0
- metadata +12 -2
@@ -0,0 +1,132 @@
|
|
1
|
+
module Sequel
|
2
|
+
class Dataset
|
3
|
+
module Convenience
|
4
|
+
# Returns the first record in the dataset.
|
5
|
+
def single_record(opts = nil)
|
6
|
+
each(opts) {|r| return r}
|
7
|
+
nil
|
8
|
+
end
|
9
|
+
|
10
|
+
NAKED_HASH = {:naked => true}.freeze
|
11
|
+
|
12
|
+
# Returns the first value of the first reecord in the dataset.
|
13
|
+
def single_value(opts = nil)
|
14
|
+
opts = opts ? NAKED_HASH.merge(opts) : NAKED_HASH
|
15
|
+
each(opts) {|r| return r.values.first}
|
16
|
+
end
|
17
|
+
|
18
|
+
# Returns the first record in the dataset. If the num argument is specified,
|
19
|
+
# an array is returned with the first <i>num</i> records.
|
20
|
+
def first(*args)
|
21
|
+
args = args.empty? ? 1 : (args.size == 1) ? args.first : args
|
22
|
+
case args
|
23
|
+
when 1: single_record(:limit => 1)
|
24
|
+
when Fixnum: limit(args).all
|
25
|
+
else
|
26
|
+
filter(args).single_record(:limit => 1)
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
# Returns the first record matching the condition.
|
31
|
+
def [](*conditions)
|
32
|
+
first(*conditions)
|
33
|
+
end
|
34
|
+
|
35
|
+
def []=(conditions, values)
|
36
|
+
filter(conditions).update(values)
|
37
|
+
end
|
38
|
+
|
39
|
+
# Returns the last records in the dataset by inverting the order. If no
|
40
|
+
# order is given, an exception is raised. If num is not given, the last
|
41
|
+
# record is returned. Otherwise an array is returned with the last
|
42
|
+
# <i>num</i> records.
|
43
|
+
def last(*args)
|
44
|
+
raise SequelError, 'No order specified' unless
|
45
|
+
@opts[:order] || (opts && opts[:order])
|
46
|
+
|
47
|
+
args = args.empty? ? 1 : (args.size == 1) ? args.first : args
|
48
|
+
|
49
|
+
case args
|
50
|
+
when Fixnum:
|
51
|
+
l = {:limit => args}
|
52
|
+
opts = {:order => invert_order(@opts[:order])}. \
|
53
|
+
merge(opts ? opts.merge(l) : l)
|
54
|
+
if args == 1
|
55
|
+
single_record(opts)
|
56
|
+
else
|
57
|
+
clone_merge(opts).all
|
58
|
+
end
|
59
|
+
else
|
60
|
+
filter(args).last(1)
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
# Maps field values for each record in the dataset (if a field name is
|
65
|
+
# given), or performs the stock mapping functionality of Enumerable.
|
66
|
+
def map(field_name = nil, &block)
|
67
|
+
if field_name
|
68
|
+
super() {|r| r[field_name]}
|
69
|
+
else
|
70
|
+
super(&block)
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
# Returns a hash with one column used as key and another used as value.
|
75
|
+
def to_hash(key_column, value_column)
|
76
|
+
inject({}) do |m, r|
|
77
|
+
m[r[key_column]] = r[value_column]
|
78
|
+
m
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
# returns a paginated dataset. The resulting dataset also provides the
|
83
|
+
# total number of pages (Dataset#page_count) and the current page number
|
84
|
+
# (Dataset#current_page), as well as Dataset#prev_page and Dataset#next_page
|
85
|
+
# for implementing pagination controls.
|
86
|
+
def paginate(page_no, page_size)
|
87
|
+
total_pages = (count / page_size.to_f).ceil
|
88
|
+
paginated = limit(page_size, (page_no - 1) * page_size)
|
89
|
+
paginated.current_page = page_no
|
90
|
+
paginated.page_count = total_pages
|
91
|
+
paginated
|
92
|
+
end
|
93
|
+
|
94
|
+
attr_accessor :page_count, :current_page
|
95
|
+
|
96
|
+
# Returns the previous page number or nil if the current page is the first
|
97
|
+
def prev_page
|
98
|
+
current_page > 1 ? (current_page - 1) : nil
|
99
|
+
end
|
100
|
+
|
101
|
+
# Returns the next page number or nil if the current page is the last page
|
102
|
+
def next_page
|
103
|
+
current_page < page_count ? (current_page + 1) : nil
|
104
|
+
end
|
105
|
+
|
106
|
+
# Returns the minimum value for the given field.
|
107
|
+
def min(field)
|
108
|
+
single_value(:select => [field.MIN])
|
109
|
+
end
|
110
|
+
|
111
|
+
# Returns the maximum value for the given field.
|
112
|
+
def max(field)
|
113
|
+
single_value(:select => [field.MAX])
|
114
|
+
end
|
115
|
+
|
116
|
+
# Returns the sum for the given field.
|
117
|
+
def sum(field)
|
118
|
+
single_value(:select => [field.SUM])
|
119
|
+
end
|
120
|
+
|
121
|
+
# Returns the average value for the given field.
|
122
|
+
def avg(field)
|
123
|
+
single_value(:select => [field.AVG])
|
124
|
+
end
|
125
|
+
|
126
|
+
# Pretty prints the records in the dataset as plain-text table.
|
127
|
+
def print(*cols)
|
128
|
+
Sequel::PrettyTable.print(naked.all, cols.empty? ? columns : cols)
|
129
|
+
end
|
130
|
+
end
|
131
|
+
end
|
132
|
+
end
|
@@ -0,0 +1,564 @@
|
|
1
|
+
module Sequel
|
2
|
+
class Dataset
|
3
|
+
# The Dataset SQL module implements all the dataset methods concerned with
|
4
|
+
# generating SQL statements for retrieving and manipulating records.
|
5
|
+
module SQL
|
6
|
+
# Returns a valid SQL fieldname as a string. Field names specified as
|
7
|
+
# symbols can include double underscores to denote a dot separator, e.g.
|
8
|
+
# :posts__id will be converted into posts.id.
|
9
|
+
def field_name(field)
|
10
|
+
field.is_a?(Symbol) ? field.to_field_name : field
|
11
|
+
end
|
12
|
+
|
13
|
+
QUALIFIED_REGEXP = /(.*)\.(.*)/.freeze
|
14
|
+
|
15
|
+
# Returns a qualified field name (including a table name) if the field
|
16
|
+
# name isn't already qualified.
|
17
|
+
def qualified_field_name(field, table)
|
18
|
+
fn = field_name(field)
|
19
|
+
fn =~ QUALIFIED_REGEXP ? fn : "#{table}.#{fn}"
|
20
|
+
end
|
21
|
+
|
22
|
+
WILDCARD = '*'.freeze
|
23
|
+
COMMA_SEPARATOR = ", ".freeze
|
24
|
+
|
25
|
+
# Converts an array of field names into a comma seperated string of
|
26
|
+
# field names. If the array is empty, a wildcard (*) is returned.
|
27
|
+
def field_list(fields)
|
28
|
+
if fields.empty?
|
29
|
+
WILDCARD
|
30
|
+
else
|
31
|
+
fields.map {|i| field_name(i)}.join(COMMA_SEPARATOR)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
# Converts an array of sources names into into a comma separated list.
|
36
|
+
def source_list(source)
|
37
|
+
if source.nil? || source.empty?
|
38
|
+
raise SequelError, 'No source specified for query'
|
39
|
+
end
|
40
|
+
source.map {|i| i.is_a?(Dataset) ? i.to_table_reference : i}.
|
41
|
+
join(COMMA_SEPARATOR)
|
42
|
+
end
|
43
|
+
|
44
|
+
NULL = "NULL".freeze
|
45
|
+
TIMESTAMP_FORMAT = "TIMESTAMP '%Y-%m-%d %H:%M:%S'".freeze
|
46
|
+
DATE_FORMAT = "DATE '%Y-%m-%d'".freeze
|
47
|
+
|
48
|
+
# Returns a literal representation of a value to be used as part
|
49
|
+
# of an SQL expression. The stock implementation supports literalization
|
50
|
+
# of String (with proper escaping to prevent SQL injections), numbers,
|
51
|
+
# Symbol (as field references), Array (as a list of literalized values),
|
52
|
+
# Time (as an SQL TIMESTAMP), Date (as an SQL DATE), Dataset (as a
|
53
|
+
# subquery) and nil (AS NULL).
|
54
|
+
#
|
55
|
+
# dataset.literal("abc'def") #=> "'abc''def'"
|
56
|
+
# dataset.literal(:items__id) #=> "items.id"
|
57
|
+
# dataset.literal([1, 2, 3]) => "(1, 2, 3)"
|
58
|
+
# dataset.literal(DB[:items]) => "(SELECT * FROM items)"
|
59
|
+
#
|
60
|
+
# If an unsupported object is given, an exception is raised.
|
61
|
+
def literal(v)
|
62
|
+
case v
|
63
|
+
when ExpressionString: v
|
64
|
+
when String: "'#{v.gsub(/'/, "''")}'"
|
65
|
+
when Integer, Float: v.to_s
|
66
|
+
when NilClass: NULL
|
67
|
+
when Symbol: v.to_field_name
|
68
|
+
when Array: v.empty? ? NULL : v.map {|i| literal(i)}.join(COMMA_SEPARATOR)
|
69
|
+
when Time: v.strftime(TIMESTAMP_FORMAT)
|
70
|
+
when Date: v.strftime(DATE_FORMAT)
|
71
|
+
when Dataset: "(#{v.sql})"
|
72
|
+
else
|
73
|
+
raise SequelError, "can't express #{v.inspect} as a SQL literal"
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
AND_SEPARATOR = " AND ".freeze
|
78
|
+
|
79
|
+
# Formats an equality expression involving a left value and a right value.
|
80
|
+
# Equality expressions differ according to the class of the right value.
|
81
|
+
# The stock implementation supports Range (inclusive and exclusive), Array
|
82
|
+
# (as a list of values to compare against), Dataset (as a subquery to
|
83
|
+
# compare against), or a regular value.
|
84
|
+
#
|
85
|
+
# dataset.format_eq_expression('id', 1..20) #=>
|
86
|
+
# "(id >= 1 AND id <= 20)"
|
87
|
+
# dataset.format_eq_expression('id', [3,6,10]) #=>
|
88
|
+
# "(id IN (3, 6, 10))"
|
89
|
+
# dataset.format_eq_expression('id', DB[:items].select(:id)) #=>
|
90
|
+
# "(id IN (SELECT id FROM items))"
|
91
|
+
# dataset.format_eq_expression('id', nil) #=>
|
92
|
+
# "(id IS NULL)"
|
93
|
+
# dataset.format_eq_expression('id', 3) #=>
|
94
|
+
# "(id = 3)"
|
95
|
+
def format_eq_expression(left, right)
|
96
|
+
case right
|
97
|
+
when Range:
|
98
|
+
right.exclude_end? ? \
|
99
|
+
"(#{left} >= #{right.begin} AND #{left} < #{right.end})" : \
|
100
|
+
"(#{left} >= #{right.begin} AND #{left} <= #{right.end})"
|
101
|
+
when Array:
|
102
|
+
"(#{left} IN (#{literal(right)}))"
|
103
|
+
when Dataset:
|
104
|
+
"(#{left} IN (#{right.sql}))"
|
105
|
+
when NilClass:
|
106
|
+
"(#{left} IS NULL)"
|
107
|
+
else
|
108
|
+
"(#{left} = #{literal(right)})"
|
109
|
+
end
|
110
|
+
end
|
111
|
+
|
112
|
+
# Formats an expression comprising a left value, a binary operator and a
|
113
|
+
# right value. The supported operators are :eql (=), :not (!=), :lt (<),
|
114
|
+
# :lte (<=), :gt (>), :gte (>=) and :like (LIKE operator). Examples:
|
115
|
+
#
|
116
|
+
# dataset.format_expression('price', :gte, 100) #=> "(price >= 100)"
|
117
|
+
# dataset.format_expression('id', :not, 30) #=> "NOT (id = 30)"
|
118
|
+
# dataset.format_expression('name', :like, 'abc%') #=>
|
119
|
+
# "(name LIKE 'abc%')"
|
120
|
+
#
|
121
|
+
# If an unsupported operator is given, an exception is raised.
|
122
|
+
def format_expression(left, op, right)
|
123
|
+
left = field_name(left)
|
124
|
+
case op
|
125
|
+
when :eql:
|
126
|
+
format_eq_expression(left, right)
|
127
|
+
when :not:
|
128
|
+
"NOT #{format_eq_expression(left, right)}"
|
129
|
+
when :lt:
|
130
|
+
"(#{left} < #{literal(right)})"
|
131
|
+
when :lte:
|
132
|
+
"(#{left} <= #{literal(right)})"
|
133
|
+
when :gt:
|
134
|
+
"(#{left} > #{literal(right)})"
|
135
|
+
when :gte:
|
136
|
+
"(#{left} >= #{literal(right)})"
|
137
|
+
when :like:
|
138
|
+
"(#{left} LIKE #{literal(right)})"
|
139
|
+
else
|
140
|
+
raise SequelError, "Invalid operator specified: #{op}"
|
141
|
+
end
|
142
|
+
end
|
143
|
+
|
144
|
+
QUESTION_MARK = '?'.freeze
|
145
|
+
|
146
|
+
# Formats a where clause. If parenthesize is true, then the whole
|
147
|
+
# generated clause will be enclosed in a set of parentheses.
|
148
|
+
def expression_list(where, parenthesize = false)
|
149
|
+
case where
|
150
|
+
when Hash:
|
151
|
+
parenthesize = false if where.size == 1
|
152
|
+
fmt = where.map {|i| format_expression(i[0], :eql, i[1])}.
|
153
|
+
join(AND_SEPARATOR)
|
154
|
+
when Array:
|
155
|
+
fmt = where.shift.gsub(QUESTION_MARK) {literal(where.shift)}
|
156
|
+
when Proc:
|
157
|
+
fmt = where.to_expressions.map {|e| format_expression(e.left, e.op, e.right)}.
|
158
|
+
join(AND_SEPARATOR)
|
159
|
+
else
|
160
|
+
# if the expression is compound, it should be parenthesized in order for
|
161
|
+
# things to be predictable (when using #or and #and.)
|
162
|
+
parenthesize |= where =~ /\).+\(/
|
163
|
+
fmt = where
|
164
|
+
end
|
165
|
+
parenthesize ? "(#{fmt})" : fmt
|
166
|
+
end
|
167
|
+
|
168
|
+
# Returns a copy of the dataset with the source changed.
|
169
|
+
def from(*source)
|
170
|
+
clone_merge(:from => source)
|
171
|
+
end
|
172
|
+
|
173
|
+
# Returns a copy of the dataset with the selected fields changed.
|
174
|
+
def select(*fields)
|
175
|
+
clone_merge(:select => fields)
|
176
|
+
end
|
177
|
+
|
178
|
+
# Returns a copy of the dataset with the distinct option.
|
179
|
+
def uniq
|
180
|
+
clone_merge(:distinct => true)
|
181
|
+
end
|
182
|
+
alias distinct uniq
|
183
|
+
|
184
|
+
# Returns a copy of the dataset with the order changed.
|
185
|
+
def order(*order)
|
186
|
+
clone_merge(:order => order)
|
187
|
+
end
|
188
|
+
|
189
|
+
# Returns a copy of the dataset with the order reversed. If no order is
|
190
|
+
# given, the existing order is inverted.
|
191
|
+
def reverse_order(*order)
|
192
|
+
order(invert_order(order.empty? ? @opts[:order] : order))
|
193
|
+
end
|
194
|
+
|
195
|
+
DESC_ORDER_REGEXP = /(.*)\sDESC/.freeze
|
196
|
+
|
197
|
+
# Inverts the given order by breaking it into a list of field references
|
198
|
+
# and inverting them.
|
199
|
+
#
|
200
|
+
# dataset.invert_order('id DESC') #=> "id"
|
201
|
+
# dataset.invert_order('category, price DESC') #=>
|
202
|
+
# "category DESC, price"
|
203
|
+
def invert_order(order)
|
204
|
+
new_order = []
|
205
|
+
order.each do |f|
|
206
|
+
f.to_s.split(',').map do |p|
|
207
|
+
p.strip!
|
208
|
+
new_order << (p =~ DESC_ORDER_REGEXP ? $1 : p.to_sym.DESC)
|
209
|
+
end
|
210
|
+
end
|
211
|
+
new_order
|
212
|
+
end
|
213
|
+
|
214
|
+
# Returns a copy of the dataset with the results grouped by the value of
|
215
|
+
# the given fields
|
216
|
+
def group(*fields)
|
217
|
+
clone_merge(:group => fields)
|
218
|
+
end
|
219
|
+
|
220
|
+
# Returns a copy of the dataset with the given conditions imposed upon it.
|
221
|
+
# If the query has been grouped, then the conditions are imposed in the
|
222
|
+
# HAVING clause. If not, then they are imposed in the WHERE clause. Filter
|
223
|
+
# accepts a Hash (formated into a list of equality expressions), an Array
|
224
|
+
# (formatted ala ActiveRecord conditions), a String (taken literally), or
|
225
|
+
# a block that is converted into expressions.
|
226
|
+
#
|
227
|
+
# dataset.filter(:id => 3).sql #=>
|
228
|
+
# "SELECT * FROM items WHERE (id = 3)"
|
229
|
+
# dataset.filter('price < ?', 100).sql #=>
|
230
|
+
# "SELECT * FROM items WHERE price < 100"
|
231
|
+
# dataset.filter('price < 100').sql #=>
|
232
|
+
# "SELECT * FROM items WHERE price < 100"
|
233
|
+
# dataset.filter {price < 100}.sql #=>
|
234
|
+
# "SELECT * FROM items WHERE (price < 100)"
|
235
|
+
#
|
236
|
+
# Multiple filter calls can be chained for scoping:
|
237
|
+
#
|
238
|
+
# software = dataset.filter(:category => 'software')
|
239
|
+
# software.filter {price < 100}.sql #=>
|
240
|
+
# "SELECT * FROM items WHERE (category = 'software') AND (price < 100)"
|
241
|
+
def filter(*cond, &block)
|
242
|
+
clause = (@opts[:group] ? :having : :where)
|
243
|
+
cond = cond.first if cond.size == 1
|
244
|
+
parenthesize = !(cond.is_a?(Hash) || cond.is_a?(Array))
|
245
|
+
filter = cond.is_a?(Hash) && cond
|
246
|
+
if @opts[clause]
|
247
|
+
if filter && cond.is_a?(Hash)
|
248
|
+
filter
|
249
|
+
end
|
250
|
+
filter =
|
251
|
+
l = expression_list(@opts[clause])
|
252
|
+
r = expression_list(block || cond, parenthesize)
|
253
|
+
clone_merge(clause => "#{l} AND #{r}")
|
254
|
+
else
|
255
|
+
clone_merge(:filter => cond, clause => expression_list(block || cond))
|
256
|
+
end
|
257
|
+
end
|
258
|
+
|
259
|
+
# Adds an alternate filter to an existing filter using OR. If no filter
|
260
|
+
# exists an error is raised.
|
261
|
+
def or(*cond, &block)
|
262
|
+
clause = (@opts[:group] ? :having : :where)
|
263
|
+
cond = cond.first if cond.size == 1
|
264
|
+
parenthesize = !(cond.is_a?(Hash) || cond.is_a?(Array))
|
265
|
+
if @opts[clause]
|
266
|
+
l = expression_list(@opts[clause])
|
267
|
+
r = expression_list(block || cond, parenthesize)
|
268
|
+
clone_merge(clause => "#{l} OR #{r}")
|
269
|
+
else
|
270
|
+
raise SequelError, "No existing filter found."
|
271
|
+
end
|
272
|
+
end
|
273
|
+
|
274
|
+
# Adds an further filter to an existing filter using AND. If no filter
|
275
|
+
# exists an error is raised. This method is identical to #filter except
|
276
|
+
# it expects an existing filter.
|
277
|
+
def and(*cond, &block)
|
278
|
+
clause = (@opts[:group] ? :having : :where)
|
279
|
+
unless @opts[clause]
|
280
|
+
raise SequelError, "No existing filter found."
|
281
|
+
end
|
282
|
+
filter(*cond, &block)
|
283
|
+
end
|
284
|
+
|
285
|
+
# Performs the inverse of Dataset#filter.
|
286
|
+
#
|
287
|
+
# dataset.exclude(:category => 'software').sql #=>
|
288
|
+
# "SELECT * FROM items WHERE NOT (category = 'software')"
|
289
|
+
def exclude(*cond, &block)
|
290
|
+
clause = (@opts[:group] ? :having : :where)
|
291
|
+
cond = cond.first if cond.size == 1
|
292
|
+
parenthesize = !(cond.is_a?(Hash) || cond.is_a?(Array))
|
293
|
+
if @opts[clause]
|
294
|
+
l = expression_list(@opts[clause])
|
295
|
+
r = expression_list(block || cond, parenthesize)
|
296
|
+
cond = "#{l} AND NOT #{r}"
|
297
|
+
else
|
298
|
+
cond = "NOT #{expression_list(block || cond, true)}"
|
299
|
+
end
|
300
|
+
clone_merge(clause => cond)
|
301
|
+
end
|
302
|
+
|
303
|
+
# Returns a copy of the dataset with the where conditions changed. Raises
|
304
|
+
# if the dataset has been grouped. See also #filter.
|
305
|
+
def where(*cond, &block)
|
306
|
+
if @opts[:group]
|
307
|
+
raise SequelError, "Can't specify a WHERE clause once the dataset has been grouped"
|
308
|
+
else
|
309
|
+
filter(*cond, &block)
|
310
|
+
end
|
311
|
+
end
|
312
|
+
|
313
|
+
# Returns a copy of the dataset with the having conditions changed. Raises
|
314
|
+
# if the dataset has not been grouped. See also #filter
|
315
|
+
def having(*cond, &block)
|
316
|
+
unless @opts[:group]
|
317
|
+
raise SequelError, "Can only specify a HAVING clause on a grouped dataset"
|
318
|
+
else
|
319
|
+
filter(*cond, &block)
|
320
|
+
end
|
321
|
+
end
|
322
|
+
|
323
|
+
# Adds a UNION clause using a second dataset object. If all is true the
|
324
|
+
# clause used is UNION ALL, which may return duplicate rows.
|
325
|
+
def union(dataset, all = false)
|
326
|
+
clone_merge(:union => dataset, :union_all => all)
|
327
|
+
end
|
328
|
+
|
329
|
+
# Adds an INTERSECT clause using a second dataset object. If all is true
|
330
|
+
# the clause used is INTERSECT ALL, which may return duplicate rows.
|
331
|
+
def intersect(dataset, all = false)
|
332
|
+
clone_merge(:intersect => dataset, :intersect_all => all)
|
333
|
+
end
|
334
|
+
|
335
|
+
# Adds an EXCEPT clause using a second dataset object. If all is true the
|
336
|
+
# clause used is EXCEPT ALL, which may return duplicate rows.
|
337
|
+
def except(dataset, all = false)
|
338
|
+
clone_merge(:except => dataset, :except_all => all)
|
339
|
+
end
|
340
|
+
|
341
|
+
JOIN_TYPES = {
|
342
|
+
:left_outer => 'LEFT OUTER JOIN'.freeze,
|
343
|
+
:right_outer => 'RIGHT OUTER JOIN'.freeze,
|
344
|
+
:full_outer => 'FULL OUTER JOIN'.freeze,
|
345
|
+
:inner => 'INNER JOIN'.freeze
|
346
|
+
}
|
347
|
+
|
348
|
+
# Returns a join clause based on the specified join type and condition.
|
349
|
+
def join_expr(type, table, expr)
|
350
|
+
join_type = JOIN_TYPES[type || :inner]
|
351
|
+
unless join_type
|
352
|
+
raise SequelError, "Invalid join type: #{type}"
|
353
|
+
end
|
354
|
+
|
355
|
+
join_expr = expr.map do |k, v|
|
356
|
+
l = qualified_field_name(k, table)
|
357
|
+
r = qualified_field_name(v, @opts[:last_joined_table] || @opts[:from])
|
358
|
+
"(#{l} = #{r})"
|
359
|
+
end.join(AND_SEPARATOR)
|
360
|
+
|
361
|
+
" #{join_type} #{table} ON #{join_expr}"
|
362
|
+
end
|
363
|
+
|
364
|
+
# Returns a joined dataset with the specified join type and condition.
|
365
|
+
def join_table(type, table, expr)
|
366
|
+
unless expr.is_a?(Hash)
|
367
|
+
expr = {expr => :id}
|
368
|
+
end
|
369
|
+
clause = join_expr(type, table, expr)
|
370
|
+
join = @opts[:join] ? @opts[:join] + clause : clause
|
371
|
+
clone_merge(:join => join, :last_joined_table => table)
|
372
|
+
end
|
373
|
+
|
374
|
+
# Returns a LEFT OUTER joined dataset.
|
375
|
+
def left_outer_join(table, expr); join_table(:left_outer, table, expr); end
|
376
|
+
|
377
|
+
# Returns a RIGHT OUTER joined dataset.
|
378
|
+
def right_outer_join(table, expr); join_table(:right_outer, table, expr); end
|
379
|
+
|
380
|
+
# Returns an OUTER joined dataset.
|
381
|
+
def full_outer_join(table, expr); join_table(:full_outer, table, expr); end
|
382
|
+
|
383
|
+
# Returns an INNER joined dataset.
|
384
|
+
def inner_join(table, expr); join_table(:inner, table, expr); end
|
385
|
+
alias join inner_join
|
386
|
+
|
387
|
+
|
388
|
+
# Inserts multiple values. If a block is given it is invoked for each
|
389
|
+
# item in the given array before inserting it.
|
390
|
+
def insert_multiple(array, &block)
|
391
|
+
if block
|
392
|
+
array.each {|i| insert(block[i])}
|
393
|
+
else
|
394
|
+
array.each {|i| insert(i)}
|
395
|
+
end
|
396
|
+
end
|
397
|
+
|
398
|
+
# Formats a SELECT statement using the given options and the dataset
|
399
|
+
# options.
|
400
|
+
def select_sql(opts = nil)
|
401
|
+
opts = opts ? @opts.merge(opts) : @opts
|
402
|
+
|
403
|
+
fields = opts[:select]
|
404
|
+
select_fields = fields ? field_list(fields) : WILDCARD
|
405
|
+
select_source = source_list(opts[:from])
|
406
|
+
sql = opts[:distinct] ? \
|
407
|
+
"SELECT DISTINCT #{select_fields} FROM #{select_source}" : \
|
408
|
+
"SELECT #{select_fields} FROM #{select_source}"
|
409
|
+
|
410
|
+
if join = opts[:join]
|
411
|
+
sql << join
|
412
|
+
end
|
413
|
+
|
414
|
+
if where = opts[:where]
|
415
|
+
sql << " WHERE #{where}"
|
416
|
+
end
|
417
|
+
|
418
|
+
if group = opts[:group]
|
419
|
+
sql << " GROUP BY #{field_list(group)}"
|
420
|
+
end
|
421
|
+
|
422
|
+
if order = opts[:order]
|
423
|
+
sql << " ORDER BY #{field_list(order)}"
|
424
|
+
end
|
425
|
+
|
426
|
+
if having = opts[:having]
|
427
|
+
sql << " HAVING #{having}"
|
428
|
+
end
|
429
|
+
|
430
|
+
if limit = opts[:limit]
|
431
|
+
sql << " LIMIT #{limit}"
|
432
|
+
if offset = opts[:offset]
|
433
|
+
sql << " OFFSET #{offset}"
|
434
|
+
end
|
435
|
+
end
|
436
|
+
|
437
|
+
if union = opts[:union]
|
438
|
+
sql << (opts[:union_all] ? \
|
439
|
+
" UNION ALL #{union.sql}" : " UNION #{union.sql}")
|
440
|
+
elsif intersect = opts[:intersect]
|
441
|
+
sql << (opts[:intersect_all] ? \
|
442
|
+
" INTERSECT ALL #{intersect.sql}" : " INTERSECT #{intersect.sql}")
|
443
|
+
elsif except = opts[:except]
|
444
|
+
sql << (opts[:except_all] ? \
|
445
|
+
" EXCEPT ALL #{except.sql}" : " EXCEPT #{except.sql}")
|
446
|
+
end
|
447
|
+
|
448
|
+
sql
|
449
|
+
end
|
450
|
+
alias sql select_sql
|
451
|
+
|
452
|
+
# Formats an INSERT statement using the given values. If a hash is given,
|
453
|
+
# the resulting statement includes field names. If no values are given,
|
454
|
+
# the resulting statement includes a DEFAULT VALUES clause.
|
455
|
+
#
|
456
|
+
# dataset.insert_sql() #=> 'INSERT INTO items DEFAULT VALUES'
|
457
|
+
# dataset.insert_sql(1,2,3) #=> 'INSERT INTO items VALUES (1, 2, 3)'
|
458
|
+
# dataset.insert_sql(:a => 1, :b => 2) #=>
|
459
|
+
# 'INSERT INTO items (a, b) VALUES (1, 2)'
|
460
|
+
def insert_sql(*values)
|
461
|
+
if values.empty?
|
462
|
+
"INSERT INTO #{@opts[:from]} DEFAULT VALUES"
|
463
|
+
elsif (values.size == 1) && values[0].is_a?(Hash)
|
464
|
+
field_list = []
|
465
|
+
value_list = []
|
466
|
+
values[0].each do |k, v|
|
467
|
+
field_list << k
|
468
|
+
value_list << literal(v)
|
469
|
+
end
|
470
|
+
fl = field_list.join(COMMA_SEPARATOR)
|
471
|
+
vl = value_list.join(COMMA_SEPARATOR)
|
472
|
+
"INSERT INTO #{@opts[:from]} (#{fl}) VALUES (#{vl})"
|
473
|
+
else
|
474
|
+
"INSERT INTO #{@opts[:from]} VALUES (#{literal(values)})"
|
475
|
+
end
|
476
|
+
end
|
477
|
+
|
478
|
+
# Formats an UPDATE statement using the given values.
|
479
|
+
#
|
480
|
+
# dataset.update_sql(:price => 100, :category => 'software') #=>
|
481
|
+
# "UPDATE items SET price = 100, category = 'software'"
|
482
|
+
def update_sql(values, opts = nil)
|
483
|
+
opts = opts ? @opts.merge(opts) : @opts
|
484
|
+
|
485
|
+
if opts[:group]
|
486
|
+
raise SequelError, "Can't update a grouped dataset"
|
487
|
+
elsif (opts[:from].size > 1) or opts[:join]
|
488
|
+
raise SequelError, "Can't update a joined dataset"
|
489
|
+
end
|
490
|
+
|
491
|
+
set_list = values.map {|k, v| "#{k} = #{literal(v)}"}.
|
492
|
+
join(COMMA_SEPARATOR)
|
493
|
+
sql = "UPDATE #{@opts[:from]} SET #{set_list}"
|
494
|
+
|
495
|
+
if where = opts[:where]
|
496
|
+
sql << " WHERE #{where}"
|
497
|
+
end
|
498
|
+
|
499
|
+
sql
|
500
|
+
end
|
501
|
+
|
502
|
+
# Formats a DELETE statement using the given options and dataset options.
|
503
|
+
#
|
504
|
+
# dataset.filter {price >= 100}.delete_sql #=>
|
505
|
+
# "DELETE FROM items WHERE (price >= 100)"
|
506
|
+
def delete_sql(opts = nil)
|
507
|
+
opts = opts ? @opts.merge(opts) : @opts
|
508
|
+
|
509
|
+
if opts[:group]
|
510
|
+
raise SequelError, "Can't delete from a grouped dataset"
|
511
|
+
elsif opts[:from].is_a?(Array) && opts[:from].size > 1
|
512
|
+
raise SequelError, "Can't delete from a joined dataset"
|
513
|
+
end
|
514
|
+
|
515
|
+
sql = "DELETE FROM #{opts[:from]}"
|
516
|
+
|
517
|
+
if where = opts[:where]
|
518
|
+
sql << " WHERE #{where}"
|
519
|
+
end
|
520
|
+
|
521
|
+
sql
|
522
|
+
end
|
523
|
+
|
524
|
+
# Returns a table reference for use in the FROM clause. If the dataset has
|
525
|
+
# only a :from option refering to a single table, only the table name is
|
526
|
+
# returned. Otherwise a subquery is returned.
|
527
|
+
def to_table_reference
|
528
|
+
if opts.keys == [:from] && opts[:from].size == 1
|
529
|
+
opts[:from].first.to_s
|
530
|
+
else
|
531
|
+
"(#{sql})"
|
532
|
+
end
|
533
|
+
end
|
534
|
+
|
535
|
+
# Returns an EXISTS clause for the dataset.
|
536
|
+
#
|
537
|
+
# dataset.exists #=> "EXISTS (SELECT 1 FROM items)"
|
538
|
+
def exists(opts = nil)
|
539
|
+
"EXISTS (#{sql({:select => [1]}.merge(opts || {}))})"
|
540
|
+
end
|
541
|
+
|
542
|
+
# If given an integer, the dataset will contain only the first l results.
|
543
|
+
# If given a range, it will contain only those at offsets within that
|
544
|
+
# range. If a second argument is given, it is used as an offset.
|
545
|
+
def limit(l, o = nil)
|
546
|
+
if l.is_a? Range
|
547
|
+
lim = (l.exclude_end? ? l.last - l.first : l.last + 1 - l.first)
|
548
|
+
clone_merge(:limit => lim, :offset=>l.first)
|
549
|
+
elsif o
|
550
|
+
clone_merge(:limit => l, :offset => o)
|
551
|
+
else
|
552
|
+
clone_merge(:limit => l)
|
553
|
+
end
|
554
|
+
end
|
555
|
+
|
556
|
+
SELECT_COUNT = {:select => ["COUNT(*)"], :order => nil}.freeze
|
557
|
+
|
558
|
+
# Returns the number of records in the dataset.
|
559
|
+
def count
|
560
|
+
single_value(SELECT_COUNT).to_i
|
561
|
+
end
|
562
|
+
end
|
563
|
+
end
|
564
|
+
end
|