sequel 4.2.0 → 4.3.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (39) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG +28 -0
  3. data/doc/extensions.rdoc +84 -0
  4. data/doc/model_plugins.rdoc +270 -0
  5. data/doc/release_notes/4.3.0.txt +40 -0
  6. data/doc/testing.rdoc +3 -0
  7. data/lib/sequel/adapters/jdbc/as400.rb +4 -0
  8. data/lib/sequel/adapters/shared/mysql.rb +6 -1
  9. data/lib/sequel/adapters/shared/postgres.rb +2 -0
  10. data/lib/sequel/ast_transformer.rb +2 -0
  11. data/lib/sequel/extensions/error_sql.rb +71 -0
  12. data/lib/sequel/extensions/migration.rb +0 -1
  13. data/lib/sequel/extensions/pagination.rb +6 -2
  14. data/lib/sequel/extensions/pg_array.rb +12 -5
  15. data/lib/sequel/extensions/pg_hstore.rb +5 -3
  16. data/lib/sequel/extensions/pg_inet.rb +3 -3
  17. data/lib/sequel/extensions/pg_interval.rb +3 -3
  18. data/lib/sequel/extensions/pg_json.rb +3 -3
  19. data/lib/sequel/extensions/pg_range.rb +3 -3
  20. data/lib/sequel/extensions/pg_row.rb +3 -3
  21. data/lib/sequel/extensions/server_block.rb +11 -3
  22. data/lib/sequel/plugins/rcte_tree.rb +59 -39
  23. data/lib/sequel/plugins/tree.rb +13 -6
  24. data/lib/sequel/sql.rb +1 -1
  25. data/lib/sequel/version.rb +1 -1
  26. data/spec/adapters/postgres_spec.rb +17 -0
  27. data/spec/core/dataset_spec.rb +14 -0
  28. data/spec/core/schema_spec.rb +1 -0
  29. data/spec/extensions/error_sql_spec.rb +20 -0
  30. data/spec/extensions/migration_spec.rb +15 -0
  31. data/spec/extensions/pagination_spec.rb +19 -0
  32. data/spec/extensions/pg_array_spec.rb +3 -2
  33. data/spec/extensions/rcte_tree_spec.rb +135 -0
  34. data/spec/extensions/tree_spec.rb +130 -0
  35. data/spec/integration/database_test.rb +5 -0
  36. data/spec/integration/dataset_test.rb +4 -0
  37. data/spec/integration/plugin_test.rb +163 -177
  38. data/spec/integration/spec_helper.rb +4 -0
  39. metadata +10 -2
@@ -31,6 +31,10 @@ module Sequel
31
31
 
32
32
  private
33
33
 
34
+ def disconnect_error?(exception, opts)
35
+ super || exception.message =~ /\AThe connection does not exist\./
36
+ end
37
+
34
38
  # Use JDBC connection's setAutoCommit to false to start transactions
35
39
  def begin_transaction(conn, opts=OPTS)
36
40
  set_transaction_isolation(conn, opts)
@@ -564,6 +564,7 @@ module Sequel
564
564
  BACKSLASH_RE = /\\/.freeze
565
565
  QUAD_BACKSLASH = "\\\\\\\\".freeze
566
566
  BLOB_START = "0x".freeze
567
+ EMPTY_BLOB = "''".freeze
567
568
  HSTAR = "H*".freeze
568
569
 
569
570
  include Sequel::Dataset::Replace
@@ -873,7 +874,11 @@ module Sequel
873
874
 
874
875
  # MySQL uses a preceding X for hex escaping strings
875
876
  def literal_blob_append(sql, v)
876
- sql << BLOB_START << v.unpack(HSTAR).first
877
+ if v.empty?
878
+ sql << EMPTY_BLOB
879
+ else
880
+ sql << BLOB_START << v.unpack(HSTAR).first
881
+ end
877
882
  end
878
883
 
879
884
  # Use 0 for false on MySQL
@@ -966,6 +966,8 @@ module Sequel
966
966
  case db_type
967
967
  when /\Ainterval\z/io
968
968
  :interval
969
+ when /\Acitext\z/io
970
+ :string
969
971
  else
970
972
  super
971
973
  end
@@ -66,6 +66,8 @@ module Sequel
66
66
  SQL::JoinUsingClause.new(v(o.using), o.join_type, v(o.table), v(o.table_alias))
67
67
  when SQL::JoinClause
68
68
  SQL::JoinClause.new(o.join_type, v(o.table), v(o.table_alias))
69
+ when SQL::DelayedEvaluation
70
+ SQL::DelayedEvaluation.new(lambda{v(o.callable.call)})
69
71
  when SQL::Wrapper
70
72
  SQL::Wrapper.new(v(o.value))
71
73
  else
@@ -0,0 +1,71 @@
1
+ # The error_sql extension adds a DatabaseError#sql method
2
+ # that you can use to get the sql that caused the error
3
+ # to be raised.
4
+ #
5
+ # begin
6
+ # DB.run "Invalid SQL"
7
+ # rescue => e
8
+ # puts e.sql # "Invalid SQL"
9
+ # end
10
+ #
11
+ # On some databases, the error message contains part or all
12
+ # of the SQL used, but on other databases, none of the SQL
13
+ # used is displayed in the error message, so it can be
14
+ # difficult to track down what is causing the error without
15
+ # using a logger. This extension should hopefully make
16
+ # debugging easier on databases that have bad error
17
+ # messages.
18
+ #
19
+ # This extension may not work correctly in the following cases:
20
+ #
21
+ # * log_yield is not used when executing the query.
22
+ # * The underlying exception is frozen or reused.
23
+ # * The underlying exception doesn't correctly record instance
24
+ # variables set on it (seems to happen on JRuby when underlying
25
+ # exception objects are Java exceptions).
26
+ #
27
+ # To load the extension into the database:
28
+ #
29
+ # DB.extension :error_sql
30
+
31
+ module Sequel
32
+ class DatabaseError
33
+ # Get the SQL code that caused this error to be raised.
34
+ def sql
35
+ # We store the error SQL in the wrapped exception instead of the
36
+ # current exception, since when the error SQL is originally associated
37
+ # with the wrapped exception, the current exception doesn't exist. It's
38
+ # possible to copy the error SQL into the current exception, but there
39
+ # doesn't seem to be a reason to do that.
40
+ wrapped_exception.instance_variable_get(:@sequel_error_sql) if wrapped_exception
41
+ end
42
+ end
43
+
44
+ module ErrorSQL
45
+ # Store the SQL related to the exception with the exception, so it
46
+ # is available for DatabaseError#sql later.
47
+ def log_exception(exception, message)
48
+ exception.instance_variable_set(:@sequel_error_sql, message)
49
+ super
50
+ end
51
+
52
+ # If there are no loggers for this database and an exception is raised
53
+ # store the SQL related to the exception with the exception, so it
54
+ # is available for DatabaseError#sql later.
55
+ def log_yield(sql, args=nil)
56
+ if @loggers.empty?
57
+ begin
58
+ yield
59
+ rescue => e
60
+ sql = "#{sql}; #{args.inspect}" if args
61
+ e.instance_variable_set(:@sequel_error_sql, sql)
62
+ raise
63
+ end
64
+ else
65
+ super
66
+ end
67
+ end
68
+ end
69
+
70
+ Database.register_extension(:error_sql, ErrorSQL)
71
+ end
@@ -404,7 +404,6 @@ module Sequel
404
404
  self
405
405
  end
406
406
  end
407
- private_class_method :migrator_class
408
407
 
409
408
  # The column to use to hold the migration version number for integer migrations or
410
409
  # filename for timestamp migrations (defaults to :version for integer migrations and
@@ -28,9 +28,11 @@ module Sequel
28
28
  end
29
29
 
30
30
  # Yields a paginated dataset for each page and returns the receiver. Does
31
- # a count to find the total number of records for this dataset.
31
+ # a count to find the total number of records for this dataset. Returns
32
+ # an enumerator if no block is given.
32
33
  def each_page(page_size)
33
34
  raise(Error, "You cannot paginate a dataset that already has a limit") if @opts[:limit]
35
+ return to_enum(:each_page, page_size) unless block_given?
34
36
  record_count = count
35
37
  total_pages = (record_count / page_size.to_f).ceil
36
38
  (1..total_pages).each{|page_no| yield paginate(page_no, page_size, record_count)}
@@ -47,7 +49,8 @@ module Sequel
47
49
  attr_accessor :page_size
48
50
 
49
51
  # The number of pages in the dataset before pagination, of which
50
- # this paginated dataset is one.
52
+ # this paginated dataset is one. Empty datasets are considered
53
+ # to have a single page.
51
54
  attr_accessor :page_count
52
55
 
53
56
  # The current page of the dataset, starting at 1 and not 0.
@@ -107,6 +110,7 @@ module Sequel
107
110
  @page_size = page_size
108
111
  @pagination_record_count = record_count
109
112
  @page_count = (record_count / page_size.to_f).ceil
113
+ @page_count = 1 if @page_count == 0
110
114
  self
111
115
  end
112
116
  end
@@ -38,8 +38,9 @@
38
38
  #
39
39
  # If you are not using the native postgres adapter and are using array
40
40
  # types as model column values you probably should use the
41
- # pg_typecast_on_load plugin in the model, and set it to typecast the
42
- # array column(s) on load.
41
+ # typecast_on_load plugin if the column values are returned as a
42
+ # regular array, and the pg_typecast_on_load plugin if the column
43
+ # values are returned as a string.
43
44
  #
44
45
  # This extension by default includes handlers for array types for
45
46
  # all scalar types that the native postgres adapter handles. It
@@ -109,6 +110,7 @@ module Sequel
109
110
 
110
111
  ARRAY = "ARRAY".freeze
111
112
  DOUBLE_COLON = '::'.freeze
113
+ EMPTY_ARRAY = "'{}'".freeze
112
114
  EMPTY_BRACKET = '[]'.freeze
113
115
  OPEN_BRACKET = '['.freeze
114
116
  CLOSE_BRACKET = ']'.freeze
@@ -517,9 +519,14 @@ module Sequel
517
519
  # If the receiver has a type, add a cast to the
518
520
  # database array type.
519
521
  def sql_literal_append(ds, sql)
520
- sql << ARRAY
521
- _literal_append(sql, ds, to_a)
522
- if at = array_type
522
+ at = array_type
523
+ if empty? && at
524
+ sql << EMPTY_ARRAY
525
+ else
526
+ sql << ARRAY
527
+ _literal_append(sql, ds, to_a)
528
+ end
529
+ if at
523
530
  sql << DOUBLE_COLON << at.to_s << EMPTY_BRACKET
524
531
  end
525
532
  end
@@ -75,9 +75,11 @@
75
75
  #
76
76
  # DB.extension :pg_hstore
77
77
  #
78
- # If you are not using the native postgres adapter, you probably
79
- # also want to use the pg_typecast_on_load plugin in the model, and
80
- # set it to typecast the hstore column(s) on load.
78
+ # If you are not using the native postgres adapter and are using hstore
79
+ # types as model column values you probably should use the
80
+ # typecast_on_load plugin if the column values are returned as a
81
+ # hash, and the pg_typecast_on_load plugin if the column
82
+ # values are returned as a string.
81
83
  #
82
84
  # This extension requires the delegate and strscan libraries.
83
85
 
@@ -10,9 +10,9 @@
10
10
  #
11
11
  # DB.extension :pg_inet
12
12
  #
13
- # If you are not using the native postgres adapter, you probably
14
- # also want to use the pg_typecast_on_load plugin in the model, and
15
- # set it to typecast the inet/cidr column(s) on load.
13
+ # If you are not using the native postgres adapter and are using inet/cidr
14
+ # types as model column values you probably should use the
15
+ # pg_typecast_on_load plugin if the column values are returned as a string.
16
16
  #
17
17
  # This extension integrates with the pg_array extension. If you plan
18
18
  # to use the inet[] or cidr[] types, load the pg_array extension before
@@ -15,9 +15,9 @@
15
15
  #
16
16
  # DB.extension :pg_interval
17
17
  #
18
- # If you are not using the native postgres adapter, you probably
19
- # also want to use the pg_typecast_on_load plugin in the model, and
20
- # set it to typecast the interval type column(s) on load.
18
+ # If you are not using the native postgres adapter and are using interval
19
+ # types as model column values you probably should use the
20
+ # pg_typecast_on_load plugin if the column values are returned as a string.
21
21
  #
22
22
  # This extension integrates with the pg_array extension. If you plan
23
23
  # to use arrays of interval types, load the pg_array extension before the
@@ -42,9 +42,9 @@
42
42
  #
43
43
  # DB.extension :pg_json
44
44
  #
45
- # If you are not using the native postgres adapter, you probably
46
- # also want to use the pg_typecast_on_load plugin in the model, and
47
- # set it to typecast the json column(s) on load.
45
+ # If you are not using the native postgres adapter and are using json
46
+ # types as model column values you probably should use the
47
+ # pg_typecast_on_load plugin if the column values are returned as a string.
48
48
  #
49
49
  # This extension integrates with the pg_array extension. If you plan
50
50
  # to use the json[] type, load the pg_array extension before the
@@ -45,9 +45,9 @@
45
45
  #
46
46
  # DB.extension :pg_range
47
47
  #
48
- # If you are not using the native postgres adapter, you probably
49
- # also want to use the pg_typecast_on_load plugin in the model, and
50
- # set it to typecast the range type column(s) on load.
48
+ # If you are not using the native postgres adapter and are using range
49
+ # types as model column values you probably should use the
50
+ # pg_typecast_on_load plugin if the column values are returned as a string.
51
51
  #
52
52
  # This extension integrates with the pg_array extension. If you plan
53
53
  # to use arrays of range types, load the pg_array extension before the
@@ -74,9 +74,9 @@
74
74
  # DB.conversion_procs.select{|k,v| v.is_a?(Sequel::Postgres::PGRow::Parser) && \
75
75
  # v.converter && (v.converter.name.nil? || v.converter.name == '') }.map{|k,v| v}
76
76
  #
77
- # If you are not using the native postgres adapter, you probably
78
- # also want to use the pg_typecast_on_load plugin in the model, and
79
- # set it to typecast the composite type column(s) on load.
77
+ # If you are not using the native postgres adapter and are using composite types
78
+ # types as model column values you probably should use the
79
+ # pg_typecast_on_load plugin if the column values are returned as a string.
80
80
  #
81
81
  # This extension requires both the strscan and delegate libraries.
82
82
 
@@ -25,9 +25,17 @@
25
25
  # end
26
26
  # DB[:a].all # Uses default
27
27
  #
28
- # Note this this extension assumes the following shard names should use the
29
- # server/shard passed to with_server: :default, nil, :read_only. All other
30
- # shard names will cause the standard behavior to be used.
28
+ # Note that if you pass the nil, :default, or :read_only server/shard
29
+ # names to Dataset#server inside a with_server block, they will be
30
+ # ignored and the server/shard given to with_server will be used:
31
+ #
32
+ # DB.with_server(:shard1) do
33
+ # DB[:a].all # Uses shard1
34
+ # DB[:a].server(:shard2).all # Uses shard2
35
+ # DB[:a].server(nil).all # Uses shard1
36
+ # DB[:a].server(:default).all # Uses shard1
37
+ # DB[:a].server(:read_only).all # Uses shard1
38
+ # end
31
39
 
32
40
  module Sequel
33
41
  module ServerBlock
@@ -102,13 +102,8 @@ module Sequel
102
102
 
103
103
  key = opts[:key] ||= :parent_id
104
104
  prkey = opts[:primary_key] ||= model.primary_key
105
-
106
- parent = opts.merge(opts.fetch(:parent, {})).fetch(:name, :parent)
107
- childrena = opts.merge(opts.fetch(:children, {})).fetch(:name, :children)
108
-
109
105
  ka = opts[:key_alias] ||= :x_root_x
110
106
  t = opts[:cte_name] ||= :t
111
- opts[:reciprocal] = nil
112
107
  c_all = if model.dataset.recursive_cte_requires_column_aliases?
113
108
  # Work around Oracle/ruby-oci8 bug that returns integers as BigDecimals in recursive queries.
114
109
  conv_bd = model.db.database_type == :oracle
@@ -119,13 +114,42 @@ module Sequel
119
114
  [SQL::ColumnAll.new(model.table_name)]
120
115
  end
121
116
 
117
+ bd_conv = lambda{|v| conv_bd && v.is_a?(BigDecimal) ? v.to_i : v}
118
+
119
+ key_array = Array(key)
120
+ prkey_array = Array(prkey)
121
+ if key.is_a?(Array)
122
+ key_conv = lambda{|m| key_array.map{|k| m[k]}}
123
+ key_present = lambda{|m| key_conv[m].all?}
124
+ prkey_conv = lambda{|m| prkey_array.map{|k| m[k]}}
125
+ key_aliases = (0...key_array.length).map{|i| :"#{ka}_#{i}"}
126
+ ka_conv = lambda{|m| key_aliases.map{|k| m[k]}}
127
+ ancestor_base_case_columns = prkey_array.zip(key_aliases).map{|k, ka_| SQL::AliasedExpression.new(k, ka_)} + c_all
128
+ descendant_base_case_columns = key_array.zip(key_aliases).map{|k, ka_| SQL::AliasedExpression.new(k, ka_)} + c_all
129
+ recursive_case_columns = prkey_array.zip(key_aliases).map{|k, ka_| SQL::QualifiedIdentifier.new(t, ka_)} + c_all
130
+ extract_key_alias = lambda{|m| key_aliases.map{|ka_| bd_conv[m.values.delete(ka_)]}}
131
+ else
132
+ key_present = key_conv = lambda{|m| m[key]}
133
+ prkey_conv = lambda{|m| m[prkey]}
134
+ key_aliases = [ka]
135
+ ka_conv = lambda{|m| m[ka]}
136
+ ancestor_base_case_columns = [SQL::AliasedExpression.new(prkey, ka)] + c_all
137
+ descendant_base_case_columns = [SQL::AliasedExpression.new(key, ka)] + c_all
138
+ recursive_case_columns = [SQL::QualifiedIdentifier.new(t, ka)] + c_all
139
+ extract_key_alias = lambda{|m| bd_conv[m.values.delete(ka)]}
140
+ end
141
+
142
+ parent = opts.merge(opts.fetch(:parent, {})).fetch(:name, :parent)
143
+ childrena = opts.merge(opts.fetch(:children, {})).fetch(:name, :children)
144
+
145
+ opts[:reciprocal] = nil
122
146
  a = opts.merge(opts.fetch(:ancestors, {}))
123
147
  ancestors = a.fetch(:name, :ancestors)
124
148
  a[:read_only] = true unless a.has_key?(:read_only)
125
149
  a[:eager_loader_key] = key
126
150
  a[:dataset] ||= proc do
127
- base_ds = model.filter(prkey=>send(key))
128
- recursive_ds = model.join(t, key=>prkey)
151
+ base_ds = model.filter(prkey_array.zip(key_array.map{|k| send(k)}))
152
+ recursive_ds = model.join(t, key_array.zip(prkey_array))
129
153
  if c = a[:conditions]
130
154
  (base_ds, recursive_ds) = [base_ds, recursive_ds].collect do |ds|
131
155
  (c.is_a?(Array) && !Sequel.condition_specifier?(c)) ? ds.filter(*c) : ds.filter(c)
@@ -140,14 +164,14 @@ module Sequel
140
164
  aal = Array(a[:after_load])
141
165
  aal << proc do |m, ancs|
142
166
  unless m.associations.has_key?(parent)
143
- parent_map = {m[prkey]=>m}
167
+ parent_map = {prkey_conv[m]=>m}
144
168
  child_map = {}
145
- child_map[m[key]] = m if m[key]
169
+ child_map[key_conv[m]] = m if key_present[m]
146
170
  m.associations[parent] = nil
147
171
  ancs.each do |obj|
148
172
  obj.associations[parent] = nil
149
- parent_map[obj[prkey]] = obj
150
- if ok = obj[key]
173
+ parent_map[prkey_conv[obj]] = obj
174
+ if ok = key_conv[obj]
151
175
  child_map[ok] = obj
152
176
  end
153
177
  end
@@ -164,16 +188,16 @@ module Sequel
164
188
  parent_map = {}
165
189
  children_map = {}
166
190
  eo[:rows].each do |obj|
167
- parent_map[obj[prkey]] = obj
168
- (children_map[obj[key]] ||= []) << obj
191
+ parent_map[prkey_conv[obj]] = obj
192
+ (children_map[key_conv[obj]] ||= []) << obj
169
193
  obj.associations[ancestors] = []
170
194
  obj.associations[parent] = nil
171
195
  end
172
196
  r = model.association_reflection(ancestors)
173
197
  base_case = model.filter(prkey=>id_map.keys).
174
- select(SQL::AliasedExpression.new(prkey, ka), *c_all)
175
- recursive_case = model.join(t, key=>prkey).
176
- select(SQL::QualifiedIdentifier.new(t, ka), *c_all)
198
+ select(*ancestor_base_case_columns)
199
+ recursive_case = model.join(t, key_array.zip(prkey_array)).
200
+ select(*recursive_case_columns)
177
201
  if c = r[:conditions]
178
202
  (base_case, recursive_case) = [base_case, recursive_case].collect do |ds|
179
203
  (c.is_a?(Array) && !Sequel.condition_specifier?(c)) ? ds.filter(*c) : ds.filter(c)
@@ -184,26 +208,24 @@ module Sequel
184
208
  model.from(SQL::AliasedExpression.new(t, table_alias)).
185
209
  with_recursive(t, base_case,
186
210
  recursive_case,
187
- :args=>(([ka] + col_aliases) if col_aliases)),
211
+ :args=>((key_aliases + col_aliases) if col_aliases)),
188
212
  r.select,
189
213
  eo[:associations], eo)
190
214
  elds = elds.select_append(ka) unless elds.opts[:select] == nil
191
215
  elds.all do |obj|
192
- opk = obj[prkey]
216
+ opk = prkey_conv[obj]
193
217
  if parent_map.has_key?(opk)
194
218
  if idm_obj = parent_map[opk]
195
- idm_obj.values[ka] = obj.values[ka]
219
+ key_aliases.each{|ka_| idm_obj.values[ka_] = obj.values[ka_]}
196
220
  obj = idm_obj
197
221
  end
198
222
  else
199
223
  obj.associations[parent] = nil
200
224
  parent_map[opk] = obj
201
- (children_map[obj[key]] ||= []) << obj
225
+ (children_map[key_conv[obj]] ||= []) << obj
202
226
  end
203
227
 
204
- kv = obj.values.delete(ka)
205
- kv = kv.to_i if conv_bd && kv.is_a?(BigDecimal)
206
- if roots = id_map[kv]
228
+ if roots = id_map[extract_key_alias[obj]]
207
229
  roots.each do |root|
208
230
  root.associations[ancestors] << obj
209
231
  end
@@ -224,8 +246,8 @@ module Sequel
224
246
  d[:read_only] = true unless d.has_key?(:read_only)
225
247
  la = d[:level_alias] ||= :x_level_x
226
248
  d[:dataset] ||= proc do
227
- base_ds = model.filter(key=>send(prkey))
228
- recursive_ds = model.join(t, prkey=>key)
249
+ base_ds = model.filter(key_array.zip(prkey_array.map{|k| send(k)}))
250
+ recursive_ds = model.join(t, prkey_array.zip(key_array))
229
251
  if c = d[:conditions]
230
252
  (base_ds, recursive_ds) = [base_ds, recursive_ds].collect do |ds|
231
253
  (c.is_a?(Array) && !Sequel.condition_specifier?(c)) ? ds.filter(*c) : ds.filter(c)
@@ -240,15 +262,15 @@ module Sequel
240
262
  dal = Array(d[:after_load])
241
263
  dal << proc do |m, descs|
242
264
  unless m.associations.has_key?(childrena)
243
- parent_map = {m[prkey]=>m}
265
+ parent_map = {prkey_conv[m]=>m}
244
266
  children_map = {}
245
267
  m.associations[childrena] = []
246
268
  descs.each do |obj|
247
269
  obj.associations[childrena] = []
248
- if opk = obj[prkey]
270
+ if opk = prkey_conv[obj]
249
271
  parent_map[opk] = obj
250
272
  end
251
- if ok = obj[key]
273
+ if ok = key_conv[obj]
252
274
  (children_map[ok] ||= []) << obj
253
275
  end
254
276
  end
@@ -264,15 +286,15 @@ module Sequel
264
286
  parent_map = {}
265
287
  children_map = {}
266
288
  eo[:rows].each do |obj|
267
- parent_map[obj[prkey]] = obj
289
+ parent_map[prkey_conv[obj]] = obj
268
290
  obj.associations[descendants] = []
269
291
  obj.associations[childrena] = []
270
292
  end
271
293
  r = model.association_reflection(descendants)
272
294
  base_case = model.filter(key=>id_map.keys).
273
- select(SQL::AliasedExpression.new(key, ka), *c_all)
274
- recursive_case = model.join(t, prkey=>key).
275
- select(SQL::QualifiedIdentifier.new(t, ka), *c_all)
295
+ select(*descendant_base_case_columns)
296
+ recursive_case = model.join(t, prkey_array.zip(key_array)).
297
+ select(*recursive_case_columns)
276
298
  if c = r[:conditions]
277
299
  (base_case, recursive_case) = [base_case, recursive_case].collect do |ds|
278
300
  (c.is_a?(Array) && !Sequel.condition_specifier?(c)) ? ds.filter(*c) : ds.filter(c)
@@ -288,7 +310,7 @@ module Sequel
288
310
  table_alias = model.dataset.schema_and_table(model.table_name)[1].to_sym
289
311
  elds = model.eager_loading_dataset(r,
290
312
  model.from(SQL::AliasedExpression.new(t, table_alias)).with_recursive(t, base_case, recursive_case,
291
- :args=>(([ka] + col_aliases + (level ? [la] : [])) if col_aliases)),
313
+ :args=>((key_aliases + col_aliases + (level ? [la] : [])) if col_aliases)),
292
314
  r.select,
293
315
  associations, eo)
294
316
  elds = elds.select_append(ka) unless elds.opts[:select] == nil
@@ -297,10 +319,10 @@ module Sequel
297
319
  no_cache = no_cache_level == obj.values.delete(la)
298
320
  end
299
321
 
300
- opk = obj[prkey]
322
+ opk = prkey_conv[obj]
301
323
  if parent_map.has_key?(opk)
302
324
  if idm_obj = parent_map[opk]
303
- idm_obj.values[ka] = obj.values[ka]
325
+ key_aliases.each{|ka_| idm_obj.values[ka_] = obj.values[ka_]}
304
326
  obj = idm_obj
305
327
  end
306
328
  else
@@ -308,13 +330,11 @@ module Sequel
308
330
  parent_map[opk] = obj
309
331
  end
310
332
 
311
- kv = obj.values.delete(ka)
312
- kv = kv.to_i if conv_bd && kv.is_a?(BigDecimal)
313
- if root = id_map[kv].first
333
+ if root = id_map[extract_key_alias[obj]].first
314
334
  root.associations[descendants] << obj
315
335
  end
316
336
 
317
- (children_map[obj[key]] ||= []) << obj
337
+ (children_map[key_conv[obj]] ||= []) << obj
318
338
  end
319
339
  children_map.each do |parent_id, objs|
320
340
  parent_map[parent_id].associations[childrena] = objs.uniq