sequel 4.33.0 → 4.34.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG +22 -0
  3. data/doc/release_notes/4.34.0.txt +86 -0
  4. data/doc/testing.rdoc +1 -0
  5. data/doc/validations.rdoc +12 -1
  6. data/lib/sequel/adapters/ado.rb +1 -1
  7. data/lib/sequel/adapters/amalgalite.rb +1 -1
  8. data/lib/sequel/adapters/cubrid.rb +1 -1
  9. data/lib/sequel/adapters/do.rb +1 -1
  10. data/lib/sequel/adapters/ibmdb.rb +1 -1
  11. data/lib/sequel/adapters/jdbc.rb +1 -1
  12. data/lib/sequel/adapters/mock.rb +1 -1
  13. data/lib/sequel/adapters/mysql.rb +1 -1
  14. data/lib/sequel/adapters/mysql2.rb +1 -1
  15. data/lib/sequel/adapters/odbc.rb +1 -1
  16. data/lib/sequel/adapters/oracle.rb +1 -1
  17. data/lib/sequel/adapters/postgres.rb +1 -1
  18. data/lib/sequel/adapters/shared/mssql.rb +1 -1
  19. data/lib/sequel/adapters/sqlanywhere.rb +1 -1
  20. data/lib/sequel/adapters/sqlite.rb +1 -1
  21. data/lib/sequel/adapters/swift.rb +1 -1
  22. data/lib/sequel/adapters/tinytds.rb +2 -2
  23. data/lib/sequel/connection_pool.rb +2 -0
  24. data/lib/sequel/connection_pool/sharded_single.rb +1 -1
  25. data/lib/sequel/connection_pool/sharded_threaded.rb +17 -4
  26. data/lib/sequel/connection_pool/single.rb +1 -1
  27. data/lib/sequel/connection_pool/threaded.rb +17 -4
  28. data/lib/sequel/database/misc.rb +5 -1
  29. data/lib/sequel/dataset.rb +4 -0
  30. data/lib/sequel/dataset/actions.rb +28 -15
  31. data/lib/sequel/extensions/columns_introspection.rb +1 -1
  32. data/lib/sequel/extensions/duplicate_columns_handler.rb +87 -0
  33. data/lib/sequel/extensions/migration.rb +9 -7
  34. data/lib/sequel/extensions/pg_range.rb +73 -14
  35. data/lib/sequel/model/base.rb +2 -2
  36. data/lib/sequel/plugins/dataset_associations.rb +21 -1
  37. data/lib/sequel/plugins/prepared_statements_safe.rb +2 -1
  38. data/lib/sequel/plugins/update_or_create.rb +1 -1
  39. data/lib/sequel/plugins/validation_helpers.rb +7 -0
  40. data/lib/sequel/version.rb +1 -1
  41. data/spec/adapters/postgres_spec.rb +14 -0
  42. data/spec/adapters/spec_helper.rb +6 -0
  43. data/spec/core/connection_pool_spec.rb +30 -3
  44. data/spec/core/database_spec.rb +2 -0
  45. data/spec/core/dataset_spec.rb +8 -0
  46. data/spec/extensions/dataset_associations_spec.rb +32 -0
  47. data/spec/extensions/duplicate_columns_handler_spec.rb +110 -0
  48. data/spec/extensions/pg_range_spec.rb +40 -0
  49. data/spec/extensions/prepared_statements_safe_spec.rb +1 -1
  50. data/spec/extensions/validation_helpers_spec.rb +11 -0
  51. data/spec/integration/associations_test.rb +22 -8
  52. data/spec/integration/dataset_test.rb +10 -0
  53. data/spec/integration/eager_loader_test.rb +1 -1
  54. data/spec/integration/plugin_test.rb +3 -3
  55. data/spec/integration/spec_helper.rb +4 -0
  56. metadata +6 -2
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 3187d95f59e9b0abc9502193d8843e11fc1c46f6
4
- data.tar.gz: 7437eeaaa3c1a9f2016ef5d0fbe70a13b6f92a54
3
+ metadata.gz: 2bbb9208954c7c7fb4d531621af12ac2ce850339
4
+ data.tar.gz: fb5c58d9906bba99dd60543dae4089f71b3b3037
5
5
  SHA512:
6
- metadata.gz: 7a6239f275f8d99e78fddfbdce4ee36f28cd00b1d79a895b5b1c1f2a6529de35b437905d20bd9a60fb9e8aff6d2dd00b4a89d5bbbeb9f5ea52b897b0cbe72830
7
- data.tar.gz: d123e46cfeb494f9ea8ecb4ee62793bb4873b6df240cec31ea7c290fe8e6c0f24f64f0f419729ee1bf7aedf724d69ad86497c248458c6818af70ccf44d65d525
6
+ metadata.gz: e00cd2f1752cdd25fd45c30479d4f104c10a7e4b6411eba817601b48b367c1a8cda00f84da65486b948497cf27f7429245654e0f77886ff1088a0d1588b39ca7
7
+ data.tar.gz: 63fce28567f5a354105a9cf816a6be443b73755beed960567011e3b16169cd22a8ae65fc692b00bd4f337fef57cb1db8105ada35aa150634b738675b83dfdeb8
data/CHANGELOG CHANGED
@@ -1,3 +1,25 @@
1
+ === 4.34.0 (2016-05-01)
2
+
3
+ * Add support for :dataset_associations_join association option to dataset_associations plugin, for making resulting datasets have appropriate joins (jeremyevans)
4
+
5
+ * Log server connection was attempted to in PoolTimeout exception messages in sharded connection pool (jeremyevans)
6
+
7
+ * Log Database :name option in PoolTimeout exception messages (bigkevmcd, jeremyevans) (#1176)
8
+
9
+ * Add duplicate_columns_handler extension, for raising or warning if a dataset returns multiple columns with the same name (TSMMark, jeremyevans) (#1175)
10
+
11
+ * Support registering per-Database custom range types in the pg_range extension (steveh) (#1174)
12
+
13
+ * Support :preconnect=>:concurrently Database option for preconnecting in separate threads (kch, jeremyevans) (#1172)
14
+
15
+ * Make prepared_statements_safe plugin work correctly with CURRENT_DATE/CURRENT_TIMESTAMP defaults (jeremyevans) (#1168)
16
+
17
+ * Add validates_operator validation helper (petedmarsh) (#1170)
18
+
19
+ * Recognize additional unique constraint violation on Microsoft SQL Server (jeremyevans)
20
+
21
+ * Add :hash option to Dataset#(select|to)_hash(_groups)? methods for choosing object to populate (mwpastore) (#1167)
22
+
1
23
  === 4.33.0 (2016-04-01)
2
24
 
3
25
  * Handle arbitrary objects passed as arguments to the association method (jeremyevans) (#1166)
@@ -0,0 +1,86 @@
1
+ = New Features
2
+
3
+ * A duplicate_columns_handler extension has been added, for printing a
4
+ warning or raising an exception if a dataset returns multiple
5
+ columns with the same name. You can set this globally for the
6
+ Database:
7
+
8
+ DB.extension :duplicate_columns_handler
9
+ DB.opts[:on_duplicate_columns] = :warn
10
+ DB.opts[:on_duplicate_columns] = proc do |columns|
11
+ columns.include?(:foo) ? :raise : :ignore
12
+ end
13
+
14
+ or for specific datasets:
15
+
16
+ ds = DB[:table].extension(:duplicate_columns_handler)
17
+ ds = ds.on_duplicate_columns(:raise)
18
+ ds = ds.on_duplicate_columns do |columns|
19
+ columns.include?(:foo) ? :raise : :ignore
20
+ end
21
+
22
+ This makes it easier to detect when duplicate columns are returned,
23
+ which in some cases can cause undesired behavior, such as the values
24
+ for later columns of the same name overwriting values for earlier
25
+ columns.
26
+
27
+ * The Dataset#to_hash, #to_hash_groups, #select_hash, and
28
+ #select_hash_groups methods now take an options hash as a third
29
+ argument. This options hash can now contain a :hash option, which
30
+ specifies the object in which the resulting values should be
31
+ placed. You can use this to have the values inserted into a
32
+ custom hash, or another object responding to #[] and #[]=.
33
+
34
+ * A validators_operator validation has been added to the
35
+ validation_helpers plugin:
36
+
37
+ class Example < Sequel::Model
38
+ def validate
39
+ super
40
+ validates_operator(:>, 3, :column1)
41
+ validates_operator(:<=, 4, [:column2, :column3])
42
+ end
43
+ end
44
+
45
+ * The pg_range extension now adds a #register_range_type Database
46
+ method, supporting per-Database custom range types:
47
+
48
+ DB.register_range_type('timerange')
49
+
50
+ * The dataset_associations plugin now supports a
51
+ :dataset_associations_join association option on associations that
52
+ use joined datasets. This option will have the datasets returned
53
+ by the dataset association methods join to the same tables that
54
+ would be joined when retriving the associated objects, allowing
55
+ selected columns, orders, and filters that reference columns in
56
+ the joined tables to work correctly.
57
+
58
+ * The Database :preconnect option can now be set to :concurrently,
59
+ which will create the connections in separate threads. This can
60
+ significantly speed up preconnection in high-latency environments.
61
+
62
+ * The Database :name option is now supported, holding an arbitrary
63
+ name for the database. Currently, it is only used in PoolTimeout
64
+ exception messages, but it may be used in other places in the
65
+ future.
66
+
67
+ = Other Improvements
68
+
69
+ * The prepared_statements_safe plugin now works correctly when using
70
+ CURRENT_DATE and CURRENT_TIMESTAMP default values for columns.
71
+
72
+ * Sequel now recognizes an addition unique constraint violation on
73
+ Microsoft SQL Server.
74
+
75
+ * PoolTimeout exception messages now include the server/shard to which
76
+ the connection was attempted when using the sharded threaded
77
+ connection pool.
78
+
79
+ = Backwards Compatibility
80
+
81
+ * Users of sequel_pg should upgrade to 1.6.17, as older versions of
82
+ sequel_pg may not work with Sequel 4.34.0+.
83
+
84
+ * Any custom extensions that override Dataset#to_hash,
85
+ #to_hash_groups, #select_hash, and #select_hash_groups need to
86
+ be modified to add support for accepting the options hash.
@@ -154,6 +154,7 @@ The SEQUEL_INTEGRATION_URL environment variable specifies the Database connectio
154
154
 
155
155
  SEQUEL_COLUMNS_INTROSPECTION :: Use the columns_introspection extension when running the specs
156
156
  SEQUEL_CONNECTION_VALIDATOR :: Use the connection validator extension when running the specs
157
+ SEQUEL_DUPLICATE_COLUMNS_HANDLER :: Use the duplicate columns handler extension with value given when running the specs
157
158
  SEQUEL_ERROR_SQL :: Use the error_sql extension when running the specs
158
159
  SEQUEL_NO_AUTO_LITERAL_STRINGS :: Use the no_auto_string_literals extension when running the specs
159
160
  SEQUEL_NO_CACHE_ASSOCIATIONS :: Don't cache association metadata when running the specs
@@ -189,7 +189,7 @@ This is similar to +validates_presence+, but only checks for NULL/nil values, al
189
189
  def validate
190
190
  super
191
191
  validates_format /\A\d\d\d-\d-\d{7}-\d-\d\z/, :isbn
192
- validates_format /\a[0-9a-zA-Z:' ]+\z/, :name
192
+ validates_format /\A[0-9a-zA-Z:' ]+\z/, :name
193
193
  end
194
194
  end
195
195
 
@@ -231,6 +231,17 @@ These methods check that the specified attributes can be valid integers or valid
231
231
  end
232
232
  end
233
233
 
234
+ === +validates_operator+
235
+
236
+ +validates_operator+ checks that a given +operator+ method returns a truthy value when called on attribute with a specified value for comparison. Generally, this is used for inequality checks (>, >=, etc) but any method that can be called on the attribute that accepts an argument and returns a truthy value may be used.
237
+
238
+ class Album < Sequel::Model
239
+ def validate
240
+ super
241
+ validates_operator(:>, 3, :tracks)
242
+ end
243
+ end
244
+
234
245
  === +validates_type+
235
246
 
236
247
  +validates_type+ checks that the specified attributes are instances of the class specified in the first argument. The class can be specified as the class itself, or as a string or symbol with the class name, or as a an array of classes.
@@ -135,7 +135,7 @@ module Sequel
135
135
  def fetch_rows(sql)
136
136
  execute(sql) do |s|
137
137
  columns = cols = s.Fields.extend(Enumerable).map{|column| output_identifier(column.Name)}
138
- @columns = columns
138
+ self.columns = columns
139
139
  s.getRows.transpose.each do |r|
140
140
  row = {}
141
141
  cols.each{|c| row[c] = r.shift}
@@ -160,7 +160,7 @@ module Sequel
160
160
  # Yield a hash for each row in the dataset.
161
161
  def fetch_rows(sql)
162
162
  execute(sql) do |stmt|
163
- @columns = cols = stmt.result_fields.map{|c| output_identifier(c)}
163
+ self.columns = cols = stmt.result_fields.map{|c| output_identifier(c)}
164
164
  col_count = cols.size
165
165
  stmt.each do |result|
166
166
  row = {}
@@ -127,7 +127,7 @@ module Sequel
127
127
  execute(sql) do |stmt|
128
128
  begin
129
129
  cols = stmt.column_info.map{|c| [output_identifier(c[COLUMN_INFO_NAME]), CUBRID_TYPE_PROCS[c[COLUMN_INFO_TYPE]]]}
130
- @columns = cols.map(&:first)
130
+ self.columns = cols.map(&:first)
131
131
  stmt.each do |r|
132
132
  row = {}
133
133
  cols.zip(r).each{|(k, p), v| row[k] = (v && p) ? p.call(v) : v}
@@ -144,7 +144,7 @@ module Sequel
144
144
  # with symbol keys.
145
145
  def fetch_rows(sql)
146
146
  execute(sql) do |reader|
147
- cols = @columns = reader.fields.map{|f| output_identifier(f)}
147
+ cols = self.columns = reader.fields.map{|f| output_identifier(f)}
148
148
  while(reader.next!) do
149
149
  h = {}
150
150
  cols.zip(reader.values).each{|k, v| h[k] = v}
@@ -401,7 +401,7 @@ module Sequel
401
401
  columns << [key, cps[type]]
402
402
  end
403
403
  cols = columns.map{|c| c.at(0)}
404
- @columns = cols
404
+ self.columns = cols
405
405
 
406
406
  while res = stmt.fetch_array
407
407
  row = {}
@@ -800,7 +800,7 @@ module Sequel
800
800
  i += 1
801
801
  cols << [output_identifier(meta.getColumnLabel(i)), i, convert ? type_convertor(map, meta, meta.getColumnType(i), i) : basic_type_convertor(map, meta, meta.getColumnType(i), i)]
802
802
  end
803
- @columns = cols.map{|c| c.at(0)}
803
+ self.columns = cols.map{|c| c.at(0)}
804
804
 
805
805
  while result.next
806
806
  row = {}
@@ -365,7 +365,7 @@ module Sequel
365
365
  if cs.empty?
366
366
  super
367
367
  else
368
- @columns = cs
368
+ self.columns = cs
369
369
  self
370
370
  end
371
371
  end
@@ -305,7 +305,7 @@ module Sequel
305
305
  type_proc = f.type == 1 && cast_tinyint_integer?(f) ? cps[2] : cps[f.type]
306
306
  [output_identifier(f.name), type_proc, i+=1]
307
307
  end
308
- @columns = cols.map(&:first)
308
+ self.columns = cols.map(&:first)
309
309
  if opts[:split_multiple_result_sets]
310
310
  s = []
311
311
  yield_rows(r, cols){|h| s << h}
@@ -154,7 +154,7 @@ module Sequel
154
154
  # Yield all rows matching this dataset.
155
155
  def fetch_rows(sql)
156
156
  execute(sql) do |r|
157
- @columns = if identifier_output_method
157
+ self.columns = if identifier_output_method
158
158
  r.fields.map!{|c| output_identifier(c.to_s)}
159
159
  else
160
160
  r.fields
@@ -98,7 +98,7 @@ module Sequel
98
98
  i = -1
99
99
  cols = s.columns(true).map{|c| [output_identifier(c.name), c.type, i+=1]}
100
100
  columns = cols.map{|c| c.at(0)}
101
- @columns = columns
101
+ self.columns = columns
102
102
  if rows = s.fetch_all
103
103
  rows.each do |row|
104
104
  hash = {}
@@ -366,7 +366,7 @@ module Sequel
366
366
  cols = columns = cursor.get_col_names.map{|c| output_identifier(c)}
367
367
  metadata = cursor.column_metadata
368
368
  cm = cols.zip(metadata).map{|c, m| [c, cps[m.data_type]]}
369
- @columns = columns
369
+ self.columns = columns
370
370
  while r = cursor.fetch
371
371
  row = {}
372
372
  r.zip(cm).each{|v, (c, cp)| row[c] = ((v && cp) ? cp.call(v) : v)}
@@ -835,7 +835,7 @@ module Sequel
835
835
  res.nfields.times do |fieldnum|
836
836
  cols << [fieldnum, procs[res.ftype(fieldnum)], output_identifier(res.fname(fieldnum))]
837
837
  end
838
- @columns = cols.map{|c| c.at(2)}
838
+ self.columns = cols.map{|c| c.at(2)}
839
839
  cols
840
840
  end
841
841
 
@@ -341,7 +341,7 @@ module Sequel
341
341
  end
342
342
 
343
343
  DATABASE_ERROR_REGEXPS = {
344
- /Violation of UNIQUE KEY constraint|Violation of PRIMARY KEY constraint.+Cannot insert duplicate key/ => UniqueConstraintViolation,
344
+ /Violation of UNIQUE KEY constraint|(Violation of PRIMARY KEY constraint.+)?Cannot insert duplicate key/ => UniqueConstraintViolation,
345
345
  /conflicted with the (FOREIGN KEY.*|REFERENCE) constraint/ => ForeignKeyConstraintViolation,
346
346
  /conflicted with the CHECK constraint/ => CheckConstraintViolation,
347
347
  /column does not allow nulls/ => NotNullConstraintViolation,
@@ -159,7 +159,7 @@ module Sequel
159
159
  col_infos << [i, output_identifier(name), cp]
160
160
  end
161
161
 
162
- @columns = col_infos.map{|a| a[1]}
162
+ self.columns = col_infos.map{|a| a[1]}
163
163
 
164
164
  if rs
165
165
  while api.sqlany_fetch_next(rs) == 1
@@ -324,7 +324,7 @@ module Sequel
324
324
  cps = db.conversion_procs
325
325
  type_procs = result.types.map{|t| cps[base_type_name(t)]}
326
326
  cols = result.columns.map{|c| i+=1; [output_identifier(c), i, type_procs[i]]}
327
- @columns = cols.map(&:first)
327
+ self.columns = cols.map(&:first)
328
328
  result.each do |values|
329
329
  row = {}
330
330
  cols.each do |name,id,type_proc|
@@ -134,7 +134,7 @@ module Sequel
134
134
  def fetch_rows(sql)
135
135
  execute(sql) do |res|
136
136
  col_map = {}
137
- @columns = res.fields.map do |c|
137
+ self.columns = res.fields.map do |c|
138
138
  col_map[c] = output_identifier(c)
139
139
  end
140
140
  tz = db.timezone if Sequel.application_timezone
@@ -225,7 +225,7 @@ module Sequel
225
225
  result.each(*args) do |r|
226
226
  unless cols
227
227
  cols = result.fields.map{|c| [c, output_identifier(c)]}
228
- @columns = columns = cols.map(&:last)
228
+ self.columns = columns = cols.map(&:last)
229
229
  end
230
230
  h = {}
231
231
  cols.each do |s, sym|
@@ -234,7 +234,7 @@ module Sequel
234
234
  yield h
235
235
  end
236
236
  else
237
- @columns = columns
237
+ self.columns = columns
238
238
  if db.timezone == :utc
239
239
  result.each(:timezone=>:utc){|r| yield r}
240
240
  else
@@ -79,6 +79,8 @@ class Sequel::ConnectionPool
79
79
  # :preconnect :: Automatically create the maximum number of connections, so that they don't
80
80
  # need to be created as needed. This is useful when connecting takes a long time
81
81
  # and you want to avoid possible latency during runtime.
82
+ # Set to :concurrently to create the connections in separate threads. Otherwise
83
+ # they'll be created sequentially.
82
84
  def initialize(db, opts=OPTS)
83
85
  @db = db
84
86
  @after_connect = opts[:after_connect]
@@ -102,7 +102,7 @@ class Sequel::ShardedSingleConnectionPool < Sequel::ConnectionPool
102
102
  end
103
103
 
104
104
  # Make sure there is a valid connection for each server.
105
- def preconnect
105
+ def preconnect(concurrent = nil)
106
106
  servers.each{|s| hold(s){}}
107
107
  end
108
108
 
@@ -193,7 +193,7 @@ class Sequel::ShardedThreadedConnectionPool < Sequel::ThreadedConnectionPool
193
193
  until conn = _acquire(thread, server)
194
194
  deadline ||= time + @timeout
195
195
  current_time = Time.now
196
- raise(::Sequel::PoolTimeout, "timeout: #{@timeout}, elapsed: #{current_time - time}") if current_time > deadline
196
+ raise_pool_timeout(current_time - time, server) if current_time > deadline
197
197
  # :nocov:
198
198
  # It's difficult to get to this point, it can only happen if there is a race condition
199
199
  # where a connection cannot be acquired even after the thread is signalled by the condition
@@ -215,7 +215,7 @@ class Sequel::ShardedThreadedConnectionPool < Sequel::ThreadedConnectionPool
215
215
  sleep_time = @sleep_time
216
216
  sleep sleep_time
217
217
  until conn = sync{_acquire(thread, server)}
218
- raise(::Sequel::PoolTimeout, "timeout: #{@timeout}, elapsed: #{Time.now - time}") if Time.now > timeout
218
+ raise_pool_timeout(Time.now - time, server) if Time.now > timeout
219
219
  sleep sleep_time
220
220
  end
221
221
  end
@@ -290,8 +290,21 @@ class Sequel::ShardedThreadedConnectionPool < Sequel::ThreadedConnectionPool
290
290
  end
291
291
 
292
292
  # Create the maximum number of connections to each server immediately.
293
- def preconnect
294
- servers.each{|s| (max_size - size(s)).times{checkin_connection(s, make_new(s))}}
293
+ def preconnect(concurrent = false)
294
+ conn_servers = @servers.keys.map{|s| Array.new(max_size - size(s), s)}.flatten
295
+
296
+ if concurrent
297
+ conn_servers.map{|s| Thread.new{[s, make_new(s)]}}.map(&:join).each{|t| checkin_connection(*t.value)}
298
+ else
299
+ conn_servers.each{|s| checkin_connection(s, make_new(s))}
300
+ end
301
+ end
302
+
303
+ # Raise a PoolTimeout error showing the current timeout, the elapsed time, the server
304
+ # the connection attempt was made to, and the database's name (if any).
305
+ def raise_pool_timeout(elapsed, server)
306
+ name = db.opts[:name]
307
+ raise ::Sequel::PoolTimeout, "timeout: #{@timeout}, elapsed: #{elapsed}, server: #{server}#{", database name: #{name}" if name}"
295
308
  end
296
309
 
297
310
  # Releases the connection assigned to the supplied thread and server. If the
@@ -44,7 +44,7 @@ class Sequel::SingleConnectionPool < Sequel::ConnectionPool
44
44
  private
45
45
 
46
46
  # Make sure there is a valid connection.
47
- def preconnect
47
+ def preconnect(concurrent = nil)
48
48
  hold{}
49
49
  end
50
50
 
@@ -160,7 +160,7 @@ class Sequel::ThreadedConnectionPool < Sequel::ConnectionPool
160
160
  until conn = _acquire(thread)
161
161
  deadline ||= time + @timeout
162
162
  current_time = Time.now
163
- raise(::Sequel::PoolTimeout, "timeout: #{@timeout}, elapsed: #{current_time - time}") if current_time > deadline
163
+ raise_pool_timeout(current_time - time) if current_time > deadline
164
164
  # :nocov:
165
165
  # It's difficult to get to this point, it can only happen if there is a race condition
166
166
  # where a connection cannot be acquired even after the thread is signalled by the condition
@@ -181,7 +181,7 @@ class Sequel::ThreadedConnectionPool < Sequel::ConnectionPool
181
181
  sleep_time = @sleep_time
182
182
  sleep sleep_time
183
183
  until conn = sync{_acquire(thread)}
184
- raise(::Sequel::PoolTimeout, "timeout: #{@timeout}, elapsed: #{Time.now - time}") if Time.now > timeout
184
+ raise_pool_timeout(Time.now - time) if Time.now > timeout
185
185
  sleep sleep_time
186
186
  end
187
187
  end
@@ -243,8 +243,21 @@ class Sequel::ThreadedConnectionPool < Sequel::ConnectionPool
243
243
  end
244
244
 
245
245
  # Create the maximum number of connections immediately.
246
- def preconnect
247
- (max_size - size).times{checkin_connection(make_new(nil))}
246
+ def preconnect(concurrent = false)
247
+ enum = (max_size - size).times
248
+
249
+ if concurrent
250
+ enum.map{Thread.new{make_new(nil)}}.map(&:join).each{|t| checkin_connection(t.value)}
251
+ else
252
+ enum.each{checkin_connection(make_new(nil))}
253
+ end
254
+ end
255
+
256
+ # Raise a PoolTimeout error showing the current timeout, the elapsed time, and the
257
+ # database's name (if any).
258
+ def raise_pool_timeout(elapsed)
259
+ name = db.opts[:name]
260
+ raise ::Sequel::PoolTimeout, "timeout: #{@timeout}, elapsed: #{elapsed}#{", database name: #{name}" if name}"
248
261
  end
249
262
 
250
263
  # Releases the connection assigned to the supplied thread back to the pool.