sequel 3.10.0 → 3.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. data/CHANGELOG +68 -0
  2. data/COPYING +1 -1
  3. data/README.rdoc +87 -27
  4. data/bin/sequel +2 -4
  5. data/doc/association_basics.rdoc +1383 -0
  6. data/doc/dataset_basics.rdoc +106 -0
  7. data/doc/opening_databases.rdoc +45 -16
  8. data/doc/querying.rdoc +210 -0
  9. data/doc/release_notes/3.11.0.txt +254 -0
  10. data/doc/virtual_rows.rdoc +217 -31
  11. data/lib/sequel/adapters/ado.rb +28 -12
  12. data/lib/sequel/adapters/ado/mssql.rb +33 -1
  13. data/lib/sequel/adapters/amalgalite.rb +13 -8
  14. data/lib/sequel/adapters/db2.rb +1 -2
  15. data/lib/sequel/adapters/dbi.rb +7 -4
  16. data/lib/sequel/adapters/do.rb +14 -15
  17. data/lib/sequel/adapters/do/postgres.rb +4 -5
  18. data/lib/sequel/adapters/do/sqlite.rb +9 -0
  19. data/lib/sequel/adapters/firebird.rb +5 -10
  20. data/lib/sequel/adapters/informix.rb +2 -4
  21. data/lib/sequel/adapters/jdbc.rb +111 -49
  22. data/lib/sequel/adapters/jdbc/mssql.rb +1 -2
  23. data/lib/sequel/adapters/jdbc/mysql.rb +11 -0
  24. data/lib/sequel/adapters/jdbc/oracle.rb +4 -7
  25. data/lib/sequel/adapters/jdbc/postgresql.rb +8 -1
  26. data/lib/sequel/adapters/jdbc/sqlite.rb +12 -0
  27. data/lib/sequel/adapters/mysql.rb +14 -5
  28. data/lib/sequel/adapters/odbc.rb +2 -4
  29. data/lib/sequel/adapters/odbc/mssql.rb +2 -4
  30. data/lib/sequel/adapters/openbase.rb +1 -2
  31. data/lib/sequel/adapters/oracle.rb +4 -8
  32. data/lib/sequel/adapters/postgres.rb +4 -11
  33. data/lib/sequel/adapters/shared/mssql.rb +22 -9
  34. data/lib/sequel/adapters/shared/mysql.rb +33 -30
  35. data/lib/sequel/adapters/shared/oracle.rb +0 -5
  36. data/lib/sequel/adapters/shared/postgres.rb +13 -11
  37. data/lib/sequel/adapters/shared/sqlite.rb +56 -10
  38. data/lib/sequel/adapters/sqlite.rb +16 -9
  39. data/lib/sequel/connection_pool.rb +6 -1
  40. data/lib/sequel/connection_pool/single.rb +1 -0
  41. data/lib/sequel/core.rb +6 -1
  42. data/lib/sequel/database.rb +52 -23
  43. data/lib/sequel/database/schema_generator.rb +6 -0
  44. data/lib/sequel/database/schema_methods.rb +5 -5
  45. data/lib/sequel/database/schema_sql.rb +1 -1
  46. data/lib/sequel/dataset.rb +4 -190
  47. data/lib/sequel/dataset/actions.rb +323 -1
  48. data/lib/sequel/dataset/features.rb +18 -2
  49. data/lib/sequel/dataset/graph.rb +7 -0
  50. data/lib/sequel/dataset/misc.rb +119 -0
  51. data/lib/sequel/dataset/mutation.rb +64 -0
  52. data/lib/sequel/dataset/prepared_statements.rb +6 -0
  53. data/lib/sequel/dataset/query.rb +272 -6
  54. data/lib/sequel/dataset/sql.rb +186 -394
  55. data/lib/sequel/model.rb +4 -2
  56. data/lib/sequel/model/associations.rb +31 -14
  57. data/lib/sequel/model/base.rb +32 -13
  58. data/lib/sequel/model/exceptions.rb +8 -4
  59. data/lib/sequel/model/plugins.rb +3 -13
  60. data/lib/sequel/plugins/active_model.rb +26 -7
  61. data/lib/sequel/plugins/instance_filters.rb +98 -0
  62. data/lib/sequel/plugins/many_through_many.rb +1 -1
  63. data/lib/sequel/plugins/optimistic_locking.rb +25 -9
  64. data/lib/sequel/version.rb +1 -1
  65. data/spec/adapters/mssql_spec.rb +26 -0
  66. data/spec/adapters/mysql_spec.rb +33 -4
  67. data/spec/adapters/postgres_spec.rb +24 -1
  68. data/spec/adapters/spec_helper.rb +6 -0
  69. data/spec/adapters/sqlite_spec.rb +28 -0
  70. data/spec/core/connection_pool_spec.rb +17 -5
  71. data/spec/core/database_spec.rb +101 -1
  72. data/spec/core/dataset_spec.rb +42 -4
  73. data/spec/core/schema_spec.rb +13 -0
  74. data/spec/extensions/active_model_spec.rb +34 -11
  75. data/spec/extensions/caching_spec.rb +2 -0
  76. data/spec/extensions/instance_filters_spec.rb +55 -0
  77. data/spec/extensions/spec_helper.rb +2 -0
  78. data/spec/integration/dataset_test.rb +12 -1
  79. data/spec/integration/model_test.rb +12 -0
  80. data/spec/integration/plugin_test.rb +61 -1
  81. data/spec/integration/schema_test.rb +14 -3
  82. data/spec/model/base_spec.rb +27 -0
  83. data/spec/model/plugins_spec.rb +0 -22
  84. data/spec/model/record_spec.rb +32 -1
  85. data/spec/model/spec_helper.rb +2 -0
  86. metadata +14 -3
  87. data/lib/sequel/dataset/convenience.rb +0 -326
@@ -11,6 +11,9 @@ module Sequel
11
11
  # Schema::Generator has some methods but also includes method_missing,
12
12
  # allowing users to specify column type as a method instead of using
13
13
  # the column method, which makes for a nicer DSL.
14
+ #
15
+ # For more information on Sequel's support for schema modification, see
16
+ # the {"Schema Modification" guide}[link:files/doc/schema_rdoc.html].
14
17
  class Generator
15
18
  # Classes specifying generic types that Sequel will convert to database-specific types.
16
19
  GENERIC_TYPES=[String, Integer, Fixnum, Bignum, Float, Numeric, BigDecimal,
@@ -200,6 +203,9 @@ module Sequel
200
203
  # object and a block of operations to perform on the table, and
201
204
  # gives the Database a table an array of operations, which the database uses to
202
205
  # alter a table's description.
206
+ #
207
+ # For more information on Sequel's support for schema modification, see
208
+ # the {"Schema Modification" guide}[link:files/doc/schema_rdoc.html].
203
209
  class AlterTableGenerator
204
210
  # An array of DDL operations to perform
205
211
  attr_reader :operations
@@ -46,7 +46,7 @@ module Sequel
46
46
  # definitions using create_table, and #add_index accepts all the options
47
47
  # available for index definition.
48
48
  #
49
- # See Schema::AlterTableGenerator.
49
+ # See Schema::AlterTableGenerator and the {"Schema Modification" guide}[link:files/doc/schema_rdoc.html].
50
50
  def alter_table(name, generator=nil, &block)
51
51
  remove_cached_schema(name)
52
52
  generator ||= Schema::AlterTableGenerator.new(self, &block)
@@ -66,7 +66,7 @@ module Sequel
66
66
  # * :temp - Create the table as a temporary table.
67
67
  # * :ignore_index_errors - Ignore any errors when creating indexes.
68
68
  #
69
- # See Schema::Generator.
69
+ # See Schema::Generator and the {"Schema Modification" guide}[link:files/doc/schema_rdoc.html].
70
70
  def create_table(name, options={}, &block)
71
71
  options = {:generator=>options} if options.is_a?(Schema::Generator)
72
72
  generator = options[:generator] || Schema::Generator.new(self, &block)
@@ -191,10 +191,10 @@ module Sequel
191
191
  # Execute the create index statements using the generator.
192
192
  def create_table_indexes_from_generator(name, generator, options)
193
193
  e = options[:ignore_index_errors]
194
- index_sql_list(name, generator.indexes).each do |sql|
194
+ generator.indexes.each do |index|
195
195
  begin
196
- execute_ddl(sql)
197
- rescue DatabaseError
196
+ index_sql_list(name, [index]).each{|sql| execute_ddl(sql)}
197
+ rescue Error
198
198
  raise unless e
199
199
  end
200
200
  end
@@ -69,7 +69,7 @@ module Sequel
69
69
  def column_definition_sql(column)
70
70
  sql = "#{quote_identifier(column[:name])} #{type_literal(column)}"
71
71
  sql << UNIQUE if column[:unique]
72
- null = column.include?(:null) ? column[:null] : column[:allow_null]
72
+ null = column.fetch(:null, column[:allow_null])
73
73
  sql << NOT_NULL if null == false
74
74
  sql << NULL if null == true
75
75
  sql << " DEFAULT #{literal(column[:default])}" if column.include?(:default)
@@ -20,199 +20,13 @@ module Sequel
20
20
  #
21
21
  # Datasets are Enumerable objects, so they can be manipulated using any
22
22
  # of the Enumerable methods, such as map, inject, etc.
23
+ #
24
+ # For more information, see the {"Dataset Basics" guide}[link:files/doc/dataset_basics_rdoc.html].
23
25
  class Dataset
24
26
  extend Metaprogramming
25
27
  include Metaprogramming
26
28
  include Enumerable
27
-
28
- # The dataset options that require the removal of cached columns
29
- # if changed.
30
- COLUMN_CHANGE_OPTS = [:select, :sql, :from, :join].freeze
31
-
32
- # All methods that should have a ! method added that modifies
33
- # the receiver.
34
- MUTATION_METHODS = %w'add_graph_aliases and cross_join distinct except exclude
35
- filter for_update from from_self full_join full_outer_join graph
36
- group group_and_count group_by having inner_join intersect invert join join_table left_join
37
- left_outer_join limit lock_style naked natural_full_join natural_join
38
- natural_left_join natural_right_join or order order_by order_more paginate qualify query
39
- reverse reverse_order right_join right_outer_join select select_all select_more server
40
- set_defaults set_graph_aliases set_overrides unfiltered ungraphed ungrouped union
41
- unlimited unordered where with with_recursive with_sql'.collect{|x| x.to_sym}
42
-
43
- # Which options don't affect the SQL generation. Used by simple_select_all?
44
- # to determine if this is a simple SELECT * FROM table.
45
- NON_SQL_OPTIONS = [:server, :defaults, :overrides, :graph, :eager_graph, :graph_aliases]
46
-
47
- NOTIMPL_MSG = "This method must be overridden in Sequel adapters".freeze
48
- WITH_SUPPORTED=:select_with_sql
49
-
50
- # The database that corresponds to this dataset
51
- attr_accessor :db
52
-
53
- # Set the method to call on identifiers going into the database for this dataset
54
- attr_accessor :identifier_input_method
55
-
56
- # Set the method to call on identifiers coming the database for this dataset
57
- attr_accessor :identifier_output_method
58
-
59
- # The hash of options for this dataset, keys are symbols.
60
- attr_accessor :opts
61
-
62
- # Whether to quote identifiers for this dataset
63
- attr_writer :quote_identifiers
64
-
65
- # The row_proc for this database, should be a Proc that takes
66
- # a single hash argument and returns the object you want
67
- # each to return.
68
- attr_accessor :row_proc
69
-
70
- # Constructs a new Dataset instance with an associated database and
71
- # options. Datasets are usually constructed by invoking the Database#[] method:
72
- #
73
- # DB[:posts]
74
- #
75
- # Sequel::Dataset is an abstract class that is not useful by itself. Each
76
- # database adaptor should provide a subclass of Sequel::Dataset, and have
77
- # the Database#dataset method return an instance of that class.
78
- def initialize(db, opts = nil)
79
- @db = db
80
- @quote_identifiers = db.quote_identifiers? if db.respond_to?(:quote_identifiers?)
81
- @identifier_input_method = db.identifier_input_method if db.respond_to?(:identifier_input_method)
82
- @identifier_output_method = db.identifier_output_method if db.respond_to?(:identifier_output_method)
83
- @opts = opts || {}
84
- @row_proc = nil
85
- end
86
-
87
- ### Class Methods ###
88
-
89
- # Setup mutation (e.g. filter!) methods. These operate the same as the
90
- # non-! methods, but replace the options of the current dataset with the
91
- # options of the resulting dataset.
92
- def self.def_mutation_method(*meths)
93
- meths.each do |meth|
94
- class_eval("def #{meth}!(*args, &block); mutation_method(:#{meth}, *args, &block) end", __FILE__, __LINE__)
95
- end
96
- end
97
-
98
- ### Instance Methods ###
99
-
100
- # Return the dataset as an aliased expression with the given alias. You can
101
- # use this as a FROM or JOIN dataset, or as a column if this dataset
102
- # returns a single row and column.
103
- def as(aliaz)
104
- ::Sequel::SQL::AliasedExpression.new(self, aliaz)
105
- end
106
-
107
- # Returns a new clone of the dataset with with the given options merged.
108
- # If the options changed include options in COLUMN_CHANGE_OPTS, the cached
109
- # columns are deleted.
110
- def clone(opts = {})
111
- c = super()
112
- c.opts = @opts.merge(opts)
113
- c.instance_variable_set(:@columns, nil) if opts.keys.any?{|o| COLUMN_CHANGE_OPTS.include?(o)}
114
- c
115
- end
116
-
117
- # Add a mutation method to this dataset instance.
118
- def def_mutation_method(*meths)
119
- meths.each do |meth|
120
- instance_eval("def #{meth}!(*args, &block); mutation_method(:#{meth}, *args, &block) end", __FILE__, __LINE__)
121
- end
122
- end
123
-
124
- # Yield a dataset for each server in the connection pool that is tied to that server.
125
- # Intended for use in sharded environments where all servers need to be modified
126
- # with the same data:
127
- #
128
- # DB[:configs].where(:key=>'setting').each_server{|ds| ds.update(:value=>'new_value')}
129
- def each_server
130
- db.servers.each{|s| yield server(s)}
131
- end
132
-
133
- # Returns a string representation of the dataset including the class name
134
- # and the corresponding SQL select statement.
135
- def inspect
136
- "#<#{self.class}: #{sql.inspect}>"
137
- end
138
-
139
- # Returns a naked dataset clone - i.e. a dataset that returns records as
140
- # hashes instead of calling the row proc.
141
- def naked
142
- ds = clone
143
- ds.row_proc = nil
144
- ds
145
- end
146
-
147
- # Set the server for this dataset to use. Used to pick a specific database
148
- # shard to run a query against, or to override the default (which is SELECT uses
149
- # :read_only database and all other queries use the :default database).
150
- def server(servr)
151
- clone(:server=>servr)
152
- end
153
-
154
- # Set the default values for insert and update statements. The values hash passed
155
- # to insert or update are merged into this hash.
156
- def set_defaults(hash)
157
- clone(:defaults=>(@opts[:defaults]||{}).merge(hash))
158
- end
159
-
160
- # Set values that override hash arguments given to insert and update statements.
161
- # This hash is merged into the hash provided to insert or update.
162
- def set_overrides(hash)
163
- clone(:overrides=>hash.merge(@opts[:overrides]||{}))
164
- end
165
-
166
- # Add the mutation methods via metaprogramming
167
- def_mutation_method(*MUTATION_METHODS)
168
-
169
- protected
170
-
171
- # Return true if the dataset has a non-nil value for any key in opts.
172
- def options_overlap(opts)
173
- !(@opts.collect{|k,v| k unless v.nil?}.compact & opts).empty?
174
- end
175
-
176
- # Whether this dataset is a simple SELECT * FROM table.
177
- def simple_select_all?
178
- o = @opts.reject{|k,v| v.nil? || NON_SQL_OPTIONS.include?(k)}
179
- o.length == 1 && (f = o[:from]) && f.length == 1 && f.first.is_a?(Symbol)
180
- end
181
-
182
- private
183
-
184
- # Set the server to use to :default unless it is already set in the passed opts
185
- def default_server_opts(opts)
186
- {:server=>@opts[:server] || :default}.merge(opts)
187
- end
188
-
189
- # Modify the identifier returned from the database based on the
190
- # identifier_output_method.
191
- def input_identifier(v)
192
- (i = identifier_input_method) ? v.to_s.send(i) : v.to_s
193
- end
194
-
195
- # Modify the receiver with the results of sending the meth, args, and block
196
- # to the receiver and merging the options of the resulting dataset into
197
- # the receiver's options.
198
- def mutation_method(meth, *args, &block)
199
- copy = send(meth, *args, &block)
200
- @opts.merge!(copy.opts)
201
- self
202
- end
203
-
204
- # Modify the identifier returned from the database based on the
205
- # identifier_output_method.
206
- def output_identifier(v)
207
- v = 'untitled' if v == ''
208
- (i = identifier_output_method) ? v.to_s.send(i).to_sym : v.to_sym
209
- end
210
-
211
- # This is run inside .all, after all of the records have been loaded
212
- # via .each, but before any block passed to all is called. It is called with
213
- # a single argument, an array of all returned records. Does nothing by
214
- # default, added to make the model eager loading code simpler.
215
- def post_load(all_records)
216
- end
217
29
  end
30
+
31
+ require(%w"query actions features graph prepared_statements misc mutation sql", 'dataset')
218
32
  end
@@ -1,10 +1,33 @@
1
1
  module Sequel
2
2
  class Dataset
3
+ # ---------------------
4
+ # :section: Methods that execute code on the database
5
+ # These methods all execute the dataset's SQL on the database.
6
+ # They don't return modified datasets, so if used in a method chain
7
+ # they should be the last method called.
8
+ # ---------------------
9
+
3
10
  # Alias for insert, but not aliased directly so subclasses
4
11
  # don't have to override both methods.
5
12
  def <<(*args)
6
13
  insert(*args)
7
14
  end
15
+
16
+ # Returns the first record matching the conditions. Examples:
17
+ #
18
+ # ds[:id=>1] => {:id=1}
19
+ def [](*conditions)
20
+ raise(Error, ARRAY_ACCESS_ERROR_MSG) if (conditions.length == 1 and conditions.first.is_a?(Integer)) or conditions.length == 0
21
+ first(*conditions)
22
+ end
23
+
24
+ # Update all records matching the conditions
25
+ # with the values specified. Examples:
26
+ #
27
+ # ds[:id=>1] = {:id=>2} # SQL: UPDATE ... SET id = 2 WHERE id = 1
28
+ def []=(conditions, values)
29
+ filter(conditions).update(values)
30
+ end
8
31
 
9
32
  # Returns an array with all records in the dataset. If a block is given,
10
33
  # the array is iterated over after all items have been loaded.
@@ -15,6 +38,11 @@ module Sequel
15
38
  a.each(&block) if block
16
39
  a
17
40
  end
41
+
42
+ # Returns the average value for the given column.
43
+ def avg(column)
44
+ aggregate_dataset.get{avg(column)}
45
+ end
18
46
 
19
47
  # Returns the columns in the result set in order.
20
48
  # If the columns are currently cached, returns the cached value. Otherwise,
@@ -32,7 +60,7 @@ module Sequel
32
60
  @columns = ds.instance_variable_get(:@columns)
33
61
  @columns || []
34
62
  end
35
-
63
+
36
64
  # Remove the cached list of columns and do a SELECT query to find
37
65
  # the columns.
38
66
  def columns!
@@ -40,6 +68,11 @@ module Sequel
40
68
  columns
41
69
  end
42
70
 
71
+ # Returns the number of records in the dataset.
72
+ def count
73
+ aggregate_dataset.get{COUNT(:*){}.as(count)}.to_i
74
+ end
75
+
43
76
  # Deletes the records in the dataset. The returned value is generally the
44
77
  # number of records deleted, but that is adapter dependent. See delete_sql.
45
78
  def delete
@@ -62,12 +95,102 @@ module Sequel
62
95
  end
63
96
  self
64
97
  end
98
+
99
+ # Returns true if no records exist in the dataset, false otherwise
100
+ def empty?
101
+ get(1).nil?
102
+ end
65
103
 
66
104
  # Executes a select query and fetches records, passing each record to the
67
105
  # supplied block. The yielded records should be hashes with symbol keys.
68
106
  def fetch_rows(sql, &block)
69
107
  raise NotImplementedError, NOTIMPL_MSG
70
108
  end
109
+
110
+ # If a integer argument is
111
+ # given, it is interpreted as a limit, and then returns all
112
+ # matching records up to that limit. If no argument is passed,
113
+ # it returns the first matching record. If any other type of
114
+ # argument(s) is passed, it is given to filter and the
115
+ # first matching record is returned. If a block is given, it is used
116
+ # to filter the dataset before returning anything. Examples:
117
+ #
118
+ # ds.first => {:id=>7}
119
+ # ds.first(2) => [{:id=>6}, {:id=>4}]
120
+ # ds.order(:id).first(2) => [{:id=>1}, {:id=>2}]
121
+ # ds.first(:id=>2) => {:id=>2}
122
+ # ds.first("id = 3") => {:id=>3}
123
+ # ds.first("id = ?", 4) => {:id=>4}
124
+ # ds.first{|o| o.id > 2} => {:id=>5}
125
+ # ds.order(:id).first{|o| o.id > 2} => {:id=>3}
126
+ # ds.first{|o| o.id > 2} => {:id=>5}
127
+ # ds.first("id > ?", 4){|o| o.id < 6} => {:id=>5}
128
+ # ds.order(:id).first(2){|o| o.id < 2} => [{:id=>1}]
129
+ def first(*args, &block)
130
+ ds = block ? filter(&block) : self
131
+
132
+ if args.empty?
133
+ ds.single_record
134
+ else
135
+ args = (args.size == 1) ? args.first : args
136
+ if Integer === args
137
+ ds.limit(args).all
138
+ else
139
+ ds.filter(args).single_record
140
+ end
141
+ end
142
+ end
143
+
144
+ # Return the column value for the first matching record in the dataset.
145
+ # Raises an error if both an argument and block is given.
146
+ #
147
+ # ds.get(:id)
148
+ # ds.get{|o| o.sum(:id)}
149
+ def get(column=nil, &block)
150
+ if column
151
+ raise(Error, ARG_BLOCK_ERROR_MSG) if block
152
+ select(column).single_value
153
+ else
154
+ select(&block).single_value
155
+ end
156
+ end
157
+
158
+ # Inserts multiple records into the associated table. This method can be
159
+ # to efficiently insert a large amounts of records into a table. Inserts
160
+ # are automatically wrapped in a transaction.
161
+ #
162
+ # This method is called with a columns array and an array of value arrays:
163
+ #
164
+ # dataset.import([:x, :y], [[1, 2], [3, 4]])
165
+ #
166
+ # This method also accepts a dataset instead of an array of value arrays:
167
+ #
168
+ # dataset.import([:x, :y], other_dataset.select(:a___x, :b___y))
169
+ #
170
+ # The method also accepts a :slice or :commit_every option that specifies
171
+ # the number of records to insert per transaction. This is useful especially
172
+ # when inserting a large number of records, e.g.:
173
+ #
174
+ # # this will commit every 50 records
175
+ # dataset.import([:x, :y], [[1, 2], [3, 4], ...], :slice => 50)
176
+ def import(columns, values, opts={})
177
+ return @db.transaction{insert(columns, values)} if values.is_a?(Dataset)
178
+
179
+ return if values.empty?
180
+ raise(Error, IMPORT_ERROR_MSG) if columns.empty?
181
+
182
+ if slice_size = opts[:commit_every] || opts[:slice]
183
+ offset = 0
184
+ loop do
185
+ @db.transaction(opts){multi_insert_sql(columns, values[offset, slice_size]).each{|st| execute_dui(st)}}
186
+ offset += slice_size
187
+ break if offset >= values.length
188
+ end
189
+ else
190
+ statements = multi_insert_sql(columns, values)
191
+ @db.transaction{statements.each{|st| execute_dui(st)}}
192
+ end
193
+ end
71
194
 
72
195
  # Inserts values into the associated table. The returned value is generally
73
196
  # the value of the primary key for the inserted row, but that is adapter dependent.
@@ -75,12 +198,168 @@ module Sequel
75
198
  def insert(*values)
76
199
  execute_insert(insert_sql(*values))
77
200
  end
201
+
202
+ # Inserts multiple values. If a block is given it is invoked for each
203
+ # item in the given array before inserting it. See #multi_insert as
204
+ # a possible faster version that inserts multiple records in one
205
+ # SQL statement.
206
+ def insert_multiple(array, &block)
207
+ if block
208
+ array.each {|i| insert(block[i])}
209
+ else
210
+ array.each {|i| insert(i)}
211
+ end
212
+ end
213
+
214
+ # Returns the interval between minimum and maximum values for the given
215
+ # column.
216
+ def interval(column)
217
+ aggregate_dataset.get{max(column) - min(column)}
218
+ end
219
+
220
+ # Reverses the order and then runs first. Note that this
221
+ # will not necessarily give you the last record in the dataset,
222
+ # unless you have an unambiguous order. If there is not
223
+ # currently an order for this dataset, raises an Error.
224
+ def last(*args, &block)
225
+ raise(Error, 'No order specified') unless @opts[:order]
226
+ reverse.first(*args, &block)
227
+ end
228
+
229
+ # Maps column values for each record in the dataset (if a column name is
230
+ # given), or performs the stock mapping functionality of Enumerable.
231
+ # Raises an error if both an argument and block are given. Examples:
232
+ #
233
+ # ds.map(:id) => [1, 2, 3, ...]
234
+ # ds.map{|r| r[:id] * 2} => [2, 4, 6, ...]
235
+ def map(column=nil, &block)
236
+ if column
237
+ raise(Error, ARG_BLOCK_ERROR_MSG) if block
238
+ super(){|r| r[column]}
239
+ else
240
+ super(&block)
241
+ end
242
+ end
243
+
244
+ # Returns the maximum value for the given column.
245
+ def max(column)
246
+ aggregate_dataset.get{max(column)}
247
+ end
248
+
249
+ # Returns the minimum value for the given column.
250
+ def min(column)
251
+ aggregate_dataset.get{min(column)}
252
+ end
253
+
254
+ # This is a front end for import that allows you to submit an array of
255
+ # hashes instead of arrays of columns and values:
256
+ #
257
+ # dataset.multi_insert([{:x => 1}, {:x => 2}])
258
+ #
259
+ # Be aware that all hashes should have the same keys if you use this calling method,
260
+ # otherwise some columns could be missed or set to null instead of to default
261
+ # values.
262
+ #
263
+ # You can also use the :slice or :commit_every option that import accepts.
264
+ def multi_insert(hashes, opts={})
265
+ return if hashes.empty?
266
+ columns = hashes.first.keys
267
+ import(columns, hashes.map{|h| columns.map{|c| h[c]}}, opts)
268
+ end
269
+
270
+ # Returns a Range object made from the minimum and maximum values for the
271
+ # given column.
272
+ def range(column)
273
+ if r = aggregate_dataset.select{[min(column).as(v1), max(column).as(v2)]}.first
274
+ (r[:v1]..r[:v2])
275
+ end
276
+ end
277
+
278
+ # Returns a hash with key_column values as keys and value_column values as
279
+ # values. Similar to to_hash, but only selects the two columns.
280
+ def select_hash(key_column, value_column)
281
+ select(key_column, value_column).to_hash(hash_key_symbol(key_column), hash_key_symbol(value_column))
282
+ end
283
+
284
+ # Selects the column given (either as an argument or as a block), and
285
+ # returns an array of all values of that column in the dataset. If you
286
+ # give a block argument that returns an array with multiple entries,
287
+ # the contents of the resulting array are undefined.
288
+ def select_map(column=nil, &block)
289
+ ds = naked.ungraphed
290
+ ds = if column
291
+ raise(Error, ARG_BLOCK_ERROR_MSG) if block
292
+ ds.select(column)
293
+ else
294
+ ds.select(&block)
295
+ end
296
+ ds.map{|r| r.values.first}
297
+ end
298
+
299
+ # The same as select_map, but in addition orders the array by the column.
300
+ def select_order_map(column=nil, &block)
301
+ ds = naked.ungraphed
302
+ ds = if column
303
+ raise(Error, ARG_BLOCK_ERROR_MSG) if block
304
+ ds.select(column).order(unaliased_identifier(column))
305
+ else
306
+ ds.select(&block).order(&block)
307
+ end
308
+ ds.map{|r| r.values.first}
309
+ end
78
310
 
79
311
  # Alias for update, but not aliased directly so subclasses
80
312
  # don't have to override both methods.
81
313
  def set(*args)
82
314
  update(*args)
83
315
  end
316
+
317
+ # Returns the first record in the dataset.
318
+ def single_record
319
+ clone(:limit=>1).each{|r| return r}
320
+ nil
321
+ end
322
+
323
+ # Returns the first value of the first record in the dataset.
324
+ # Returns nil if dataset is empty.
325
+ def single_value
326
+ if r = naked.ungraphed.single_record
327
+ r.values.first
328
+ end
329
+ end
330
+
331
+ # Returns the sum for the given column.
332
+ def sum(column)
333
+ aggregate_dataset.get{sum(column)}
334
+ end
335
+
336
+ # Returns a string in CSV format containing the dataset records. By
337
+ # default the CSV representation includes the column titles in the
338
+ # first line. You can turn that off by passing false as the
339
+ # include_column_titles argument.
340
+ #
341
+ # This does not use a CSV library or handle quoting of values in
342
+ # any way. If any values in any of the rows could include commas or line
343
+ # endings, you shouldn't use this.
344
+ def to_csv(include_column_titles = true)
345
+ n = naked
346
+ cols = n.columns
347
+ csv = ''
348
+ csv << "#{cols.join(COMMA_SEPARATOR)}\r\n" if include_column_titles
349
+ n.each{|r| csv << "#{cols.collect{|c| r[c]}.join(COMMA_SEPARATOR)}\r\n"}
350
+ csv
351
+ end
352
+
353
+ # Returns a hash with one column used as key and another used as value.
354
+ # If rows have duplicate values for the key column, the latter row(s)
355
+ # will overwrite the value of the previous row(s). If the value_column
356
+ # is not given or nil, uses the entire hash as the value.
357
+ def to_hash(key_column, value_column = nil)
358
+ inject({}) do |m, r|
359
+ m[r[key_column]] = value_column ? r[value_column] : r
360
+ m
361
+ end
362
+ end
84
363
 
85
364
  # Truncates the dataset. Returns nil.
86
365
  def truncate
@@ -94,6 +373,11 @@ module Sequel
94
373
  end
95
374
 
96
375
  private
376
+
377
+ # Set the server to use to :default unless it is already set in the passed opts
378
+ def default_server_opts(opts)
379
+ {:server=>@opts[:server] || :default}.merge(opts)
380
+ end
97
381
 
98
382
  # Execute the given SQL on the database using execute.
99
383
  def execute(sql, opts={}, &block)
@@ -115,5 +399,43 @@ module Sequel
115
399
  def execute_insert(sql, opts={}, &block)
116
400
  @db.execute_insert(sql, default_server_opts(opts), &block)
117
401
  end
402
+
403
+ # Return a plain symbol given a potentially qualified or aliased symbol,
404
+ # specifying the symbol that is likely to be used as the hash key
405
+ # for the column when records are returned.
406
+ def hash_key_symbol(s)
407
+ raise(Error, "#{s.inspect} is not a symbol") unless s.is_a?(Symbol)
408
+ _, c, a = split_symbol(s)
409
+ (a || c).to_sym
410
+ end
411
+
412
+ # Modify the identifier returned from the database based on the
413
+ # identifier_output_method.
414
+ def output_identifier(v)
415
+ v = 'untitled' if v == ''
416
+ (i = identifier_output_method) ? v.to_s.send(i).to_sym : v.to_sym
417
+ end
418
+
419
+ # This is run inside .all, after all of the records have been loaded
420
+ # via .each, but before any block passed to all is called. It is called with
421
+ # a single argument, an array of all returned records. Does nothing by
422
+ # default, added to make the model eager loading code simpler.
423
+ def post_load(all_records)
424
+ end
425
+
426
+ # Return the unaliased part of the identifier. Handles both
427
+ # implicit aliases in symbols, as well as SQL::AliasedExpression
428
+ # objects. Other objects are returned as is.
429
+ def unaliased_identifier(c)
430
+ case c
431
+ when Symbol
432
+ c_table, column, _ = split_symbol(c)
433
+ c_table ? column.to_sym.qualify(c_table) : column.to_sym
434
+ when SQL::AliasedExpression
435
+ c.expression
436
+ else
437
+ c
438
+ end
439
+ end
118
440
  end
119
441
  end