sequel 0.2.1.1 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/CHANGELOG +76 -0
- data/Rakefile +1 -1
- data/lib/sequel.rb +1 -1
- data/lib/sequel/ado.rb +17 -0
- data/lib/sequel/array_keys.rb +233 -0
- data/lib/sequel/connection_pool.rb +14 -0
- data/lib/sequel/core_ext.rb +3 -3
- data/lib/sequel/database.rb +25 -7
- data/lib/sequel/dataset.rb +46 -15
- data/lib/sequel/dataset/convenience.rb +27 -2
- data/lib/sequel/dataset/sequelizer.rb +2 -2
- data/lib/sequel/dataset/sql.rb +49 -18
- data/lib/sequel/dbi.rb +17 -0
- data/lib/sequel/model.rb +276 -82
- data/lib/sequel/model/base.rb +41 -30
- data/lib/sequel/model/caching.rb +42 -0
- data/lib/sequel/model/hooks.rb +113 -27
- data/lib/sequel/model/record.rb +78 -21
- data/lib/sequel/model/relations.rb +5 -0
- data/lib/sequel/model/schema.rb +11 -1
- data/lib/sequel/mysql.rb +61 -17
- data/lib/sequel/odbc.rb +42 -1
- data/lib/sequel/postgres.rb +45 -0
- data/lib/sequel/pretty_table.rb +14 -11
- data/lib/sequel/schema/schema_generator.rb +9 -3
- data/lib/sequel/sqlite.rb +33 -1
- data/spec/adapters/mysql_spec.rb +69 -15
- data/spec/adapters/postgres_spec.rb +66 -12
- data/spec/adapters/sqlite_spec.rb +113 -1
- data/spec/array_keys_spec.rb +544 -0
- data/spec/connection_pool_spec.rb +83 -0
- data/spec/database_spec.rb +81 -2
- data/spec/dataset_spec.rb +227 -9
- data/spec/model_spec.rb +392 -68
- data/spec/schema_spec.rb +7 -0
- metadata +5 -2
data/lib/sequel/dataset.rb
CHANGED
@@ -147,6 +147,7 @@ module Sequel
|
|
147
147
|
@columns || []
|
148
148
|
end
|
149
149
|
|
150
|
+
# Inserts the supplied values into the associated table.
|
150
151
|
def <<(*args)
|
151
152
|
insert(*args)
|
152
153
|
end
|
@@ -263,6 +264,11 @@ module Sequel
|
|
263
264
|
update_each_method
|
264
265
|
end
|
265
266
|
|
267
|
+
STOCK_TRANSFORMS = {
|
268
|
+
:marshal => [proc {|v| Marshal.load(v)}, proc {|v| Marshal.dump(v)}],
|
269
|
+
:yaml => [proc {|v| YAML.load v if v}, proc {|v| v.to_yaml}]
|
270
|
+
}
|
271
|
+
|
266
272
|
# Sets a value transform which is used to convert values loaded and saved
|
267
273
|
# to/from the database. The transform should be supplied as a hash. Each
|
268
274
|
# value in the hash should be an array containing two proc objects - one
|
@@ -277,36 +283,51 @@ module Sequel
|
|
277
283
|
#
|
278
284
|
# dataset.insert_sql(:obj => 1234) #=>
|
279
285
|
# "INSERT INTO items (obj) VALUES ('\004\bi\002\322\004')"
|
280
|
-
#
|
286
|
+
#
|
287
|
+
# Another form of using transform is by specifying stock transforms:
|
288
|
+
#
|
289
|
+
# dataset.transform(:obj => :marshal)
|
290
|
+
#
|
291
|
+
# The currently supported stock transforms are :marshal and :yaml.
|
281
292
|
def transform(t)
|
282
293
|
@transform = t
|
294
|
+
t.each do |k, v|
|
295
|
+
case v
|
296
|
+
when Array:
|
297
|
+
if (v.size != 2) || !v.first.is_a?(Proc) && !v.last.is_a?(Proc)
|
298
|
+
raise SequelError, "Invalid transform specified"
|
299
|
+
end
|
300
|
+
else
|
301
|
+
unless v = STOCK_TRANSFORMS[v]
|
302
|
+
raise SequelError, "Invalid transform specified"
|
303
|
+
else
|
304
|
+
t[k] = v
|
305
|
+
end
|
306
|
+
end
|
307
|
+
end
|
283
308
|
update_each_method
|
284
309
|
end
|
285
310
|
|
286
311
|
# Applies the value transform for data loaded from the database.
|
287
312
|
def transform_load(r)
|
288
|
-
|
289
|
-
k
|
290
|
-
|
291
|
-
|
313
|
+
@transform.each do |k, tt|
|
314
|
+
if r.has_key?(k)
|
315
|
+
r[k] = tt[0][r[k]]
|
316
|
+
end
|
292
317
|
end
|
293
|
-
|
294
|
-
# @transform.each do |k, tt|
|
295
|
-
# r[k] = tt[0][r[k]]
|
296
|
-
# end
|
297
|
-
# r
|
318
|
+
r
|
298
319
|
end
|
299
320
|
|
300
321
|
# Applies the value transform for data saved to the database.
|
301
322
|
def transform_save(r)
|
302
|
-
|
303
|
-
k
|
304
|
-
|
305
|
-
|
323
|
+
@transform.each do |k, tt|
|
324
|
+
if r.has_key?(k)
|
325
|
+
r[k] = tt[1][r[k]]
|
326
|
+
end
|
306
327
|
end
|
328
|
+
r
|
307
329
|
end
|
308
330
|
|
309
|
-
private
|
310
331
|
# Updates the each method according to whether @row_proc and @transform are
|
311
332
|
# set or not.
|
312
333
|
def update_each_method
|
@@ -358,6 +379,16 @@ module Sequel
|
|
358
379
|
end
|
359
380
|
end
|
360
381
|
end
|
382
|
+
|
383
|
+
@@dataset_classes = []
|
384
|
+
|
385
|
+
def self.dataset_classes
|
386
|
+
@@dataset_classes
|
387
|
+
end
|
388
|
+
|
389
|
+
def self.inherited(c)
|
390
|
+
@@dataset_classes << c
|
391
|
+
end
|
361
392
|
end
|
362
393
|
end
|
363
394
|
|
@@ -3,6 +3,12 @@ require 'enumerator'
|
|
3
3
|
module Sequel
|
4
4
|
class Dataset
|
5
5
|
module Convenience
|
6
|
+
# Iterates through each record, converting it into a hash.
|
7
|
+
def each_hash(&block)
|
8
|
+
each {|a| block[a.to_hash]}
|
9
|
+
end
|
10
|
+
|
11
|
+
# Returns true if the record count is 0
|
6
12
|
def empty?
|
7
13
|
count == 0
|
8
14
|
end
|
@@ -24,13 +30,16 @@ module Sequel
|
|
24
30
|
|
25
31
|
# Returns the first record in the dataset. If the num argument is specified,
|
26
32
|
# an array is returned with the first <i>num</i> records.
|
27
|
-
def first(*args)
|
33
|
+
def first(*args, &block)
|
34
|
+
if block
|
35
|
+
return filter(&block).single_record(:limit => 1)
|
36
|
+
end
|
28
37
|
args = args.empty? ? 1 : (args.size == 1) ? args.first : args
|
29
38
|
case args
|
30
39
|
when 1: single_record(:limit => 1)
|
31
40
|
when Fixnum: limit(args).all
|
32
41
|
else
|
33
|
-
filter(args).single_record(:limit => 1)
|
42
|
+
filter(args, &block).single_record(:limit => 1)
|
34
43
|
end
|
35
44
|
end
|
36
45
|
|
@@ -168,6 +177,22 @@ module Sequel
|
|
168
177
|
Sequel::PrettyTable.print(naked.all, cols.empty? ? columns : cols)
|
169
178
|
end
|
170
179
|
|
180
|
+
COMMA_SEPARATOR = ', '.freeze
|
181
|
+
|
182
|
+
# Returns a string in CSV format containing the dataset records. By
|
183
|
+
# default the CSV representation includes the column titles in the
|
184
|
+
# first line. You can turn that off by passing false as the
|
185
|
+
# include_column_titles argument.
|
186
|
+
def to_csv(include_column_titles = true)
|
187
|
+
records = naked.to_a
|
188
|
+
csv = ''
|
189
|
+
if include_column_titles
|
190
|
+
csv << "#{@columns.join(COMMA_SEPARATOR)}\r\n"
|
191
|
+
end
|
192
|
+
records.each {|r| csv << "#{r.join(COMMA_SEPARATOR)}\r\n"}
|
193
|
+
csv
|
194
|
+
end
|
195
|
+
|
171
196
|
# Inserts multiple records into the associated table. This method can be
|
172
197
|
# to efficiently insert a large amounts of records into a table. Inserts
|
173
198
|
# are automatically wrapped in a transaction. If the :commit_every
|
@@ -40,8 +40,8 @@ class Sequel::Dataset
|
|
40
40
|
case r
|
41
41
|
when Range:
|
42
42
|
r.exclude_end? ? \
|
43
|
-
"(#{l} >= #{literal(r.begin)} AND #{l} < #{literal(r.end)})" : \
|
44
|
-
"(#{l} >= #{literal(r.begin)} AND #{l} <= #{literal(r.end)})"
|
43
|
+
"(#{literal(l)} >= #{literal(r.begin)} AND #{literal(l)} < #{literal(r.end)})" : \
|
44
|
+
"(#{literal(l)} >= #{literal(r.begin)} AND #{literal(l)} <= #{literal(r.end)})"
|
45
45
|
when Array:
|
46
46
|
"(#{literal(l)} IN (#{literal(r)}))"
|
47
47
|
when Sequel::Dataset:
|
data/lib/sequel/dataset/sql.rb
CHANGED
@@ -52,7 +52,9 @@ module Sequel
|
|
52
52
|
if fields.empty?
|
53
53
|
WILDCARD
|
54
54
|
else
|
55
|
-
fields.map
|
55
|
+
fields.map do |i|
|
56
|
+
i.is_a?(Hash) ? i.map {|kv| "#{literal(kv[0])} AS #{kv[1]}"} : literal(i)
|
57
|
+
end.join(COMMA_SEPARATOR)
|
56
58
|
end
|
57
59
|
end
|
58
60
|
|
@@ -61,8 +63,19 @@ module Sequel
|
|
61
63
|
if source.nil? || source.empty?
|
62
64
|
raise SequelError, 'No source specified for query'
|
63
65
|
end
|
64
|
-
|
65
|
-
|
66
|
+
auto_alias_count = 0
|
67
|
+
source.map do |i|
|
68
|
+
case i
|
69
|
+
when Dataset:
|
70
|
+
auto_alias_count += 1
|
71
|
+
i.to_table_reference(auto_alias_count)
|
72
|
+
when Hash:
|
73
|
+
i.map {|k, v| "#{k.is_a?(Dataset) ? k.to_table_reference : k} #{v}"}.
|
74
|
+
join(COMMA_SEPARATOR)
|
75
|
+
else
|
76
|
+
i
|
77
|
+
end
|
78
|
+
end.join(COMMA_SEPARATOR)
|
66
79
|
end
|
67
80
|
|
68
81
|
NULL = "NULL".freeze
|
@@ -92,7 +105,7 @@ module Sequel
|
|
92
105
|
when NilClass: NULL
|
93
106
|
when TrueClass: TRUE
|
94
107
|
when FalseClass: FALSE
|
95
|
-
when Symbol: v.to_field_name
|
108
|
+
when Symbol: quoted_field_name(v.to_field_name)
|
96
109
|
when Array: v.empty? ? NULL : v.map {|i| literal(i)}.join(COMMA_SEPARATOR)
|
97
110
|
when Time: v.strftime(TIMESTAMP_FORMAT)
|
98
111
|
when Date: v.strftime(DATE_FORMAT)
|
@@ -111,7 +124,10 @@ module Sequel
|
|
111
124
|
case expr
|
112
125
|
when Hash:
|
113
126
|
parenthesize = false if expr.size == 1
|
114
|
-
fmt = expr.map {|i| compare_expr(i[0], i[1])}.join(AND_SEPARATOR)
|
127
|
+
# fmt = expr.map {|i| compare_expr(i[0], i[1])}.join(AND_SEPARATOR)
|
128
|
+
# N.B.: We convert this to an array and sort it in order to have a fixed order for testability.
|
129
|
+
# Hash in Ruby 1.8 has no order, so Hash#map is indeterminate, which makes it hard to test.
|
130
|
+
fmt = expr.to_a.sort_by { |k, v| k.to_s }.map {|i| compare_expr(i[0], i[1])}.join(AND_SEPARATOR)
|
115
131
|
when Array:
|
116
132
|
fmt = expr.shift.gsub(QUESTION_MARK) {literal(expr.shift)}
|
117
133
|
when Proc:
|
@@ -154,7 +170,7 @@ module Sequel
|
|
154
170
|
order(*invert_order(order.empty? ? @opts[:order] : order))
|
155
171
|
end
|
156
172
|
|
157
|
-
DESC_ORDER_REGEXP = /(.*)\sDESC
|
173
|
+
DESC_ORDER_REGEXP = /(.*)\sDESC/i.freeze
|
158
174
|
|
159
175
|
# Inverts the given order by breaking it into a list of field references
|
160
176
|
# and inverting them.
|
@@ -167,7 +183,7 @@ module Sequel
|
|
167
183
|
order.each do |f|
|
168
184
|
f.to_s.split(',').map do |p|
|
169
185
|
p.strip!
|
170
|
-
new_order << (p =~ DESC_ORDER_REGEXP ? $1 : p.to_sym.DESC)
|
186
|
+
new_order << ((p =~ DESC_ORDER_REGEXP ? $1 : p.to_sym.DESC).lit)
|
171
187
|
end
|
172
188
|
end
|
173
189
|
new_order
|
@@ -312,13 +328,13 @@ module Sequel
|
|
312
328
|
raise SequelError, "Invalid join type: #{type}"
|
313
329
|
end
|
314
330
|
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
" #{join_type} #{table} ON #{
|
331
|
+
join_conditions = {}
|
332
|
+
expr.each do |k, v|
|
333
|
+
k = qualified_field_name(k, table).intern if k.is_a?(Symbol)
|
334
|
+
v = qualified_field_name(v, @opts[:last_joined_table] || @opts[:from].first).intern if v.is_a?(Symbol)
|
335
|
+
join_conditions[k] = v
|
336
|
+
end
|
337
|
+
" #{join_type} #{table} ON #{expression_list(join_conditions)}"
|
322
338
|
end
|
323
339
|
|
324
340
|
# Returns a joined dataset with the specified join type and condition.
|
@@ -427,6 +443,18 @@ module Sequel
|
|
427
443
|
else
|
428
444
|
values = values[0] if values.size == 1
|
429
445
|
case values
|
446
|
+
when Array
|
447
|
+
if values.fields
|
448
|
+
if values.empty?
|
449
|
+
"INSERT INTO #{@opts[:from]} DEFAULT VALUES;"
|
450
|
+
else
|
451
|
+
fl = values.fields
|
452
|
+
vl = transform_save(values.values).map {|v| literal(v)}
|
453
|
+
"INSERT INTO #{@opts[:from]} (#{fl.join(COMMA_SEPARATOR)}) VALUES (#{vl.join(COMMA_SEPARATOR)});"
|
454
|
+
end
|
455
|
+
else
|
456
|
+
"INSERT INTO #{@opts[:from]} VALUES (#{literal(values)});"
|
457
|
+
end
|
430
458
|
when Hash
|
431
459
|
values = transform_save(values) if @transform
|
432
460
|
if values.empty?
|
@@ -437,7 +465,7 @@ module Sequel
|
|
437
465
|
"INSERT INTO #{@opts[:from]} (#{fl.join(COMMA_SEPARATOR)}) VALUES (#{vl.join(COMMA_SEPARATOR)});"
|
438
466
|
end
|
439
467
|
when Dataset
|
440
|
-
"INSERT INTO #{@opts[:from]} #{literal(values)}"
|
468
|
+
"INSERT INTO #{@opts[:from]} #{literal(values)};"
|
441
469
|
else
|
442
470
|
"INSERT INTO #{@opts[:from]} VALUES (#{literal(values)});"
|
443
471
|
end
|
@@ -457,6 +485,9 @@ module Sequel
|
|
457
485
|
raise SequelError, "Can't update a joined dataset"
|
458
486
|
end
|
459
487
|
|
488
|
+
if values.is_a?(Array) && values.fields
|
489
|
+
values = values.to_hash
|
490
|
+
end
|
460
491
|
values = transform_save(values) if @transform
|
461
492
|
set_list = values.map {|k, v| "#{field_name(k)} = #{literal(v)}"}.
|
462
493
|
join(COMMA_SEPARATOR)
|
@@ -494,11 +525,11 @@ module Sequel
|
|
494
525
|
# Returns a table reference for use in the FROM clause. If the dataset has
|
495
526
|
# only a :from option refering to a single table, only the table name is
|
496
527
|
# returned. Otherwise a subquery is returned.
|
497
|
-
def to_table_reference
|
528
|
+
def to_table_reference(idx = nil)
|
498
529
|
if opts.keys == [:from] && opts[:from].size == 1
|
499
530
|
opts[:from].first.to_s
|
500
531
|
else
|
501
|
-
"(#{sql})"
|
532
|
+
idx ? "(#{sql}) t#{idx}" : "(#{sql})"
|
502
533
|
end
|
503
534
|
end
|
504
535
|
|
@@ -523,7 +554,7 @@ module Sequel
|
|
523
554
|
end
|
524
555
|
end
|
525
556
|
|
526
|
-
SELECT_COUNT = {:select => ["COUNT(*)"], :order => nil}.freeze
|
557
|
+
SELECT_COUNT = {:select => ["COUNT(*)".lit], :order => nil}.freeze
|
527
558
|
|
528
559
|
# Returns the number of records in the dataset.
|
529
560
|
def count
|
data/lib/sequel/dbi.rb
CHANGED
@@ -14,6 +14,10 @@ module Sequel
|
|
14
14
|
dbname = 'DBI:' + dbname unless dbname =~ /^DBI:/
|
15
15
|
::DBI.connect(dbname, @opts[:user], @opts[:password])
|
16
16
|
end
|
17
|
+
|
18
|
+
def disconnect
|
19
|
+
@pool.disconnect {|c| c.disconnect}
|
20
|
+
end
|
17
21
|
|
18
22
|
def dataset(opts = nil)
|
19
23
|
DBI::Dataset.new(self, opts)
|
@@ -56,6 +60,19 @@ module Sequel
|
|
56
60
|
self
|
57
61
|
end
|
58
62
|
|
63
|
+
def array_tuples_fetch_rows(sql, &block)
|
64
|
+
@db.synchronize do
|
65
|
+
s = @db.execute sql
|
66
|
+
begin
|
67
|
+
@columns = s.column_names.map {|c| c.to_sym}
|
68
|
+
s.fetch {|r| r.fields = @columns; yield r}
|
69
|
+
ensure
|
70
|
+
s.finish rescue nil
|
71
|
+
end
|
72
|
+
end
|
73
|
+
self
|
74
|
+
end
|
75
|
+
|
59
76
|
def hash_row(stmt, row)
|
60
77
|
@columns.inject({}) do |m, c|
|
61
78
|
m[c] = row.shift
|
data/lib/sequel/model.rb
CHANGED
@@ -1,82 +1,276 @@
|
|
1
|
-
module Sequel
|
2
|
-
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
1
|
+
module Sequel
|
2
|
+
# == Cheatsheet:
|
3
|
+
# class Item < Sequel::Model(:items)
|
4
|
+
# set_schema do
|
5
|
+
# primary_key :id
|
6
|
+
# text :name, :unique => true, :null => false
|
7
|
+
# boolean :active, :default => true
|
8
|
+
# integer :grade
|
9
|
+
#
|
10
|
+
# index :grade
|
11
|
+
# end
|
12
|
+
# end
|
13
|
+
#
|
14
|
+
# Item.create_table unless Item.table_exists?
|
15
|
+
# Item.create_table!
|
16
|
+
#
|
17
|
+
# i = Item.create(:name => 'Shoes', :grade => 0)
|
18
|
+
#
|
19
|
+
# Item[1].grade #=> 0
|
20
|
+
#
|
21
|
+
# i.set(:grade => 2)
|
22
|
+
# i.grade # => 2
|
23
|
+
#
|
24
|
+
# Item[:name => 'Shoes'].grade # => 2
|
25
|
+
#
|
26
|
+
# i.grade = 4
|
27
|
+
# Item[1].grade # => 2
|
28
|
+
# i.save
|
29
|
+
# Item[1].grade # => 4
|
30
|
+
#
|
31
|
+
# == Subsets
|
32
|
+
# Subsets are filter mapped to class methods:
|
33
|
+
#
|
34
|
+
# class Ticket < Sequel::Model(:tickets)
|
35
|
+
#
|
36
|
+
# subset(:pending) { finished_at == nil }
|
37
|
+
# subset(:closed) { finished_at != nil }
|
38
|
+
#
|
39
|
+
# # ...
|
40
|
+
#
|
41
|
+
# end
|
42
|
+
#
|
43
|
+
# Now you can do:
|
44
|
+
#
|
45
|
+
# Ticket.pending.each { |ticket| puts ticket.caption }
|
46
|
+
#
|
47
|
+
# == Advanced filtering methods (or dataset magic)
|
48
|
+
# One of the cool features of Sequel::Model is that it acts as a proxy to
|
49
|
+
# the underlying dataset, so you can invoke methods on the class instead of
|
50
|
+
# on the dataset:
|
51
|
+
#
|
52
|
+
# Customer.filter(:name =~ 'Roberts')
|
53
|
+
#
|
54
|
+
# In the prevailing style of implementing models (which is actually very
|
55
|
+
# similar to ActiveRecord models) table-wide operations are defined as
|
56
|
+
# class methods:
|
57
|
+
#
|
58
|
+
# class Node < Sequel::Model(:nodes)
|
59
|
+
# def self.subtree(path)
|
60
|
+
# filter(:path => Regexp.new("^#{path}(/.+)?$"))
|
61
|
+
# end
|
62
|
+
# def self.alarms
|
63
|
+
# filter {:kind => ALARM}
|
64
|
+
# end
|
65
|
+
# def self.recalculate
|
66
|
+
# exclude(:expression => nil).each {|n| n.calculate}
|
67
|
+
# end
|
68
|
+
# end
|
69
|
+
#
|
70
|
+
# The recalculate class method calls the exclude method. The exclude
|
71
|
+
# call is proxied to the underlying dataset, which lets you call each
|
72
|
+
# method separately:
|
73
|
+
#
|
74
|
+
# Node.subtree('/test')
|
75
|
+
# Node.alarms
|
76
|
+
# Node.recalculate
|
77
|
+
#
|
78
|
+
# ... but this will raise a NoMethodError:
|
79
|
+
#
|
80
|
+
# Node.subtree('/test').alarms.recalculate
|
81
|
+
#
|
82
|
+
# It turns out the solution is very simple - instead of defining class
|
83
|
+
# methods, define dataset methods:
|
84
|
+
#
|
85
|
+
# class Node < Sequel::Model(:nodes)
|
86
|
+
# def dataset.subtree(path)
|
87
|
+
# filter(:path => Regexp.new("^#{path}(/.+)?$"))
|
88
|
+
# end
|
89
|
+
# def dataset.alarms
|
90
|
+
# filter {:kind => ALARM}
|
91
|
+
# end
|
92
|
+
# def dataset.recalculate
|
93
|
+
# exclude(:expression => nil).each {|n| n.calculate}
|
94
|
+
# end
|
95
|
+
# end
|
96
|
+
#
|
97
|
+
# Now you can mix all of these methods any way you like:
|
98
|
+
#
|
99
|
+
# Node.filter {:stamp < Time.now < 3600}.alarms
|
100
|
+
# Node.filter(:project_id => 123).subtree('/abc')
|
101
|
+
# Node.subtree('/test').recalculate
|
102
|
+
# # ...
|
103
|
+
#
|
104
|
+
# == Schemas
|
105
|
+
# You can define your schema in the Model class itself:
|
106
|
+
#
|
107
|
+
# class Comment < Sequel::Model(:comments)
|
108
|
+
# set_schema do
|
109
|
+
# primary_key :id
|
110
|
+
# foreign_key :post_id, :table => :posts, :on_delete => :cascade
|
111
|
+
#
|
112
|
+
# varchar :name
|
113
|
+
# varchar :email
|
114
|
+
# text :comment
|
115
|
+
# end
|
116
|
+
#
|
117
|
+
# # ...
|
118
|
+
#
|
119
|
+
# end
|
120
|
+
#
|
121
|
+
# == Hooks
|
122
|
+
# You can setup hooks here:
|
123
|
+
# * before_save calls either
|
124
|
+
# * before_create with
|
125
|
+
# * after_create or if record already exists
|
126
|
+
# * before_update with
|
127
|
+
# * after_update and finally
|
128
|
+
# * after_save
|
129
|
+
# ... and here:
|
130
|
+
# * before_destroy with
|
131
|
+
# * after_destroy
|
132
|
+
#
|
133
|
+
# ...with:
|
134
|
+
#
|
135
|
+
# class Example < Sequel::Model(:hooks)
|
136
|
+
# before_create { self.created_at = Time.now }
|
137
|
+
#
|
138
|
+
# # ...
|
139
|
+
# end
|
140
|
+
#
|
141
|
+
# == Serialization of complexe attributes
|
142
|
+
# Sometimes there are datatypes you can't natively map to your db. In this
|
143
|
+
# case you can just do serialize:
|
144
|
+
#
|
145
|
+
# class Serialized < Sequel::Model(:serialized)
|
146
|
+
# serialize :column1, :format => :yaml # YAML is the default serialization method
|
147
|
+
# serialize :column2, :format => :marshal # serializes through marshalling
|
148
|
+
#
|
149
|
+
# # ...
|
150
|
+
#
|
151
|
+
# end
|
152
|
+
class Model
|
153
|
+
alias_method :model, :class
|
154
|
+
end
|
155
|
+
end
|
156
|
+
|
157
|
+
require File.join(File.dirname(__FILE__), 'model/base')
|
158
|
+
require File.join(File.dirname(__FILE__), 'model/hooks')
|
159
|
+
require File.join(File.dirname(__FILE__), 'model/record')
|
160
|
+
require File.join(File.dirname(__FILE__), 'model/schema')
|
161
|
+
require File.join(File.dirname(__FILE__), 'model/relations')
|
162
|
+
require File.join(File.dirname(__FILE__), 'model/caching')
|
163
|
+
|
164
|
+
module Sequel
|
165
|
+
class Model
|
166
|
+
|
167
|
+
# Defines a method that returns a filtered dataset.
|
168
|
+
def self.subset(name, *args, &block)
|
169
|
+
dataset.meta_def(name) {filter(*args, &block)}
|
170
|
+
end
|
171
|
+
|
172
|
+
# Comprehensive description goes here!
|
173
|
+
def primary_key_hash(value)
|
174
|
+
# stock implementation
|
175
|
+
{:id => value}
|
176
|
+
end
|
177
|
+
|
178
|
+
# Finds a single record according to the supplied filter, e.g.:
|
179
|
+
#
|
180
|
+
# Ticket.find :author => 'Sharon' # => record
|
181
|
+
# Ticket.find {:price}17 # => Dataset
|
182
|
+
#
|
183
|
+
def self.find(*args, &block)
|
184
|
+
dataset.filter(*args, &block).limit(1).first
|
185
|
+
# dataset[cond.is_a?(Hash) ? cond : primary_key_hash(cond)]
|
186
|
+
end
|
187
|
+
|
188
|
+
def self.[](*args)
|
189
|
+
args = args.first if (args.size == 1)
|
190
|
+
dataset[(Hash === args) ? args : primary_key_hash(args)]
|
191
|
+
end
|
192
|
+
|
193
|
+
# Like find but invokes create with given conditions when record does not
|
194
|
+
# exists.
|
195
|
+
def self.find_or_create(cond)
|
196
|
+
find(cond) || create(cond)
|
197
|
+
end
|
198
|
+
|
199
|
+
############################################################################
|
200
|
+
|
201
|
+
# Like delete_all, but invokes before_destroy and after_destroy hooks if used.
|
202
|
+
def self.destroy_all
|
203
|
+
has_hooks?(:before_destroy) || has_hooks?(:after_destroy) ? \
|
204
|
+
dataset.destroy : dataset.delete
|
205
|
+
end
|
206
|
+
# Deletes all records.
|
207
|
+
def self.delete_all
|
208
|
+
dataset.delete
|
209
|
+
end
|
210
|
+
|
211
|
+
FIND_BY_REGEXP = /^find_by_(.*)/.freeze
|
212
|
+
FILTER_BY_REGEXP = /^filter_by_(.*)/.freeze
|
213
|
+
ALL_BY_REGEXP = /^all_by_(.*)/.freeze
|
214
|
+
|
215
|
+
def self.method_missing(m, *args, &block) #:nodoc:
|
216
|
+
Thread.exclusive do
|
217
|
+
method_name = m.to_s
|
218
|
+
if method_name =~ FIND_BY_REGEXP
|
219
|
+
c = $1.to_sym
|
220
|
+
meta_def(method_name) {|arg| find(c => arg)}
|
221
|
+
elsif method_name =~ FILTER_BY_REGEXP
|
222
|
+
c = $1.to_sym
|
223
|
+
meta_def(method_name) {|arg| filter(c => arg)}
|
224
|
+
elsif method_name =~ ALL_BY_REGEXP
|
225
|
+
c = $1.to_sym
|
226
|
+
meta_def(method_name) {|arg| filter(c => arg).all}
|
227
|
+
elsif dataset.respond_to?(m)
|
228
|
+
instance_eval("def #{m}(*args, &block); dataset.#{m}(*args, &block); end")
|
229
|
+
end
|
230
|
+
end
|
231
|
+
respond_to?(m) ? send(m, *args, &block) : super(m, *args)
|
232
|
+
end
|
233
|
+
|
234
|
+
# Comprehensive description goes here!
|
235
|
+
def self.join(*args)
|
236
|
+
table_name = dataset.opts[:from].first
|
237
|
+
dataset.join(*args).select(table_name.to_sym.ALL)
|
238
|
+
end
|
239
|
+
|
240
|
+
# Returns value of attribute.
|
241
|
+
def [](field)
|
242
|
+
@values[field]
|
243
|
+
end
|
244
|
+
# Sets value of attribute.
|
245
|
+
def []=(field, value)
|
246
|
+
@values[field] = value
|
247
|
+
end
|
248
|
+
|
249
|
+
# Enumerates through all attributes.
|
250
|
+
#
|
251
|
+
# === Example:
|
252
|
+
# Ticket.find(7).each { |k, v| puts "#{k} => #{v}" }
|
253
|
+
def each(&block)
|
254
|
+
@values.each(&block)
|
255
|
+
end
|
256
|
+
# Returns attribute names.
|
257
|
+
def keys
|
258
|
+
@values.keys
|
259
|
+
end
|
260
|
+
|
261
|
+
# Returns value for <tt>:id</tt> attribute.
|
262
|
+
def id
|
263
|
+
@values[:id]
|
264
|
+
end
|
265
|
+
|
266
|
+
# Compares models by values.
|
267
|
+
def ==(obj)
|
268
|
+
(obj.class == model) && (obj.values == @values)
|
269
|
+
end
|
270
|
+
# Compares object by pkey.
|
271
|
+
def ===(obj)
|
272
|
+
(obj.class == model) && (obj.pkey == pkey)
|
273
|
+
end
|
274
|
+
|
275
|
+
end
|
276
|
+
end
|