sequel 4.33.0 → 4.34.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG +22 -0
- data/doc/release_notes/4.34.0.txt +86 -0
- data/doc/testing.rdoc +1 -0
- data/doc/validations.rdoc +12 -1
- data/lib/sequel/adapters/ado.rb +1 -1
- data/lib/sequel/adapters/amalgalite.rb +1 -1
- data/lib/sequel/adapters/cubrid.rb +1 -1
- data/lib/sequel/adapters/do.rb +1 -1
- data/lib/sequel/adapters/ibmdb.rb +1 -1
- data/lib/sequel/adapters/jdbc.rb +1 -1
- data/lib/sequel/adapters/mock.rb +1 -1
- data/lib/sequel/adapters/mysql.rb +1 -1
- data/lib/sequel/adapters/mysql2.rb +1 -1
- data/lib/sequel/adapters/odbc.rb +1 -1
- data/lib/sequel/adapters/oracle.rb +1 -1
- data/lib/sequel/adapters/postgres.rb +1 -1
- data/lib/sequel/adapters/shared/mssql.rb +1 -1
- data/lib/sequel/adapters/sqlanywhere.rb +1 -1
- data/lib/sequel/adapters/sqlite.rb +1 -1
- data/lib/sequel/adapters/swift.rb +1 -1
- data/lib/sequel/adapters/tinytds.rb +2 -2
- data/lib/sequel/connection_pool.rb +2 -0
- data/lib/sequel/connection_pool/sharded_single.rb +1 -1
- data/lib/sequel/connection_pool/sharded_threaded.rb +17 -4
- data/lib/sequel/connection_pool/single.rb +1 -1
- data/lib/sequel/connection_pool/threaded.rb +17 -4
- data/lib/sequel/database/misc.rb +5 -1
- data/lib/sequel/dataset.rb +4 -0
- data/lib/sequel/dataset/actions.rb +28 -15
- data/lib/sequel/extensions/columns_introspection.rb +1 -1
- data/lib/sequel/extensions/duplicate_columns_handler.rb +87 -0
- data/lib/sequel/extensions/migration.rb +9 -7
- data/lib/sequel/extensions/pg_range.rb +73 -14
- data/lib/sequel/model/base.rb +2 -2
- data/lib/sequel/plugins/dataset_associations.rb +21 -1
- data/lib/sequel/plugins/prepared_statements_safe.rb +2 -1
- data/lib/sequel/plugins/update_or_create.rb +1 -1
- data/lib/sequel/plugins/validation_helpers.rb +7 -0
- data/lib/sequel/version.rb +1 -1
- data/spec/adapters/postgres_spec.rb +14 -0
- data/spec/adapters/spec_helper.rb +6 -0
- data/spec/core/connection_pool_spec.rb +30 -3
- data/spec/core/database_spec.rb +2 -0
- data/spec/core/dataset_spec.rb +8 -0
- data/spec/extensions/dataset_associations_spec.rb +32 -0
- data/spec/extensions/duplicate_columns_handler_spec.rb +110 -0
- data/spec/extensions/pg_range_spec.rb +40 -0
- data/spec/extensions/prepared_statements_safe_spec.rb +1 -1
- data/spec/extensions/validation_helpers_spec.rb +11 -0
- data/spec/integration/associations_test.rb +22 -8
- data/spec/integration/dataset_test.rb +10 -0
- data/spec/integration/eager_loader_test.rb +1 -1
- data/spec/integration/plugin_test.rb +3 -3
- data/spec/integration/spec_helper.rb +4 -0
- metadata +6 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 2bbb9208954c7c7fb4d531621af12ac2ce850339
|
4
|
+
data.tar.gz: fb5c58d9906bba99dd60543dae4089f71b3b3037
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: e00cd2f1752cdd25fd45c30479d4f104c10a7e4b6411eba817601b48b367c1a8cda00f84da65486b948497cf27f7429245654e0f77886ff1088a0d1588b39ca7
|
7
|
+
data.tar.gz: 63fce28567f5a354105a9cf816a6be443b73755beed960567011e3b16169cd22a8ae65fc692b00bd4f337fef57cb1db8105ada35aa150634b738675b83dfdeb8
|
data/CHANGELOG
CHANGED
@@ -1,3 +1,25 @@
|
|
1
|
+
=== 4.34.0 (2016-05-01)
|
2
|
+
|
3
|
+
* Add support for :dataset_associations_join association option to dataset_associations plugin, for making resulting datasets have appropriate joins (jeremyevans)
|
4
|
+
|
5
|
+
* Log server connection was attempted to in PoolTimeout exception messages in sharded connection pool (jeremyevans)
|
6
|
+
|
7
|
+
* Log Database :name option in PoolTimeout exception messages (bigkevmcd, jeremyevans) (#1176)
|
8
|
+
|
9
|
+
* Add duplicate_columns_handler extension, for raising or warning if a dataset returns multiple columns with the same name (TSMMark, jeremyevans) (#1175)
|
10
|
+
|
11
|
+
* Support registering per-Database custom range types in the pg_range extension (steveh) (#1174)
|
12
|
+
|
13
|
+
* Support :preconnect=>:concurrently Database option for preconnecting in separate threads (kch, jeremyevans) (#1172)
|
14
|
+
|
15
|
+
* Make prepared_statements_safe plugin work correctly with CURRENT_DATE/CURRENT_TIMESTAMP defaults (jeremyevans) (#1168)
|
16
|
+
|
17
|
+
* Add validates_operator validation helper (petedmarsh) (#1170)
|
18
|
+
|
19
|
+
* Recognize additional unique constraint violation on Microsoft SQL Server (jeremyevans)
|
20
|
+
|
21
|
+
* Add :hash option to Dataset#(select|to)_hash(_groups)? methods for choosing object to populate (mwpastore) (#1167)
|
22
|
+
|
1
23
|
=== 4.33.0 (2016-04-01)
|
2
24
|
|
3
25
|
* Handle arbitrary objects passed as arguments to the association method (jeremyevans) (#1166)
|
@@ -0,0 +1,86 @@
|
|
1
|
+
= New Features
|
2
|
+
|
3
|
+
* A duplicate_columns_handler extension has been added, for printing a
|
4
|
+
warning or raising an exception if a dataset returns multiple
|
5
|
+
columns with the same name. You can set this globally for the
|
6
|
+
Database:
|
7
|
+
|
8
|
+
DB.extension :duplicate_columns_handler
|
9
|
+
DB.opts[:on_duplicate_columns] = :warn
|
10
|
+
DB.opts[:on_duplicate_columns] = proc do |columns|
|
11
|
+
columns.include?(:foo) ? :raise : :ignore
|
12
|
+
end
|
13
|
+
|
14
|
+
or for specific datasets:
|
15
|
+
|
16
|
+
ds = DB[:table].extension(:duplicate_columns_handler)
|
17
|
+
ds = ds.on_duplicate_columns(:raise)
|
18
|
+
ds = ds.on_duplicate_columns do |columns|
|
19
|
+
columns.include?(:foo) ? :raise : :ignore
|
20
|
+
end
|
21
|
+
|
22
|
+
This makes it easier to detect when duplicate columns are returned,
|
23
|
+
which in some cases can cause undesired behavior, such as the values
|
24
|
+
for later columns of the same name overwriting values for earlier
|
25
|
+
columns.
|
26
|
+
|
27
|
+
* The Dataset#to_hash, #to_hash_groups, #select_hash, and
|
28
|
+
#select_hash_groups methods now take an options hash as a third
|
29
|
+
argument. This options hash can now contain a :hash option, which
|
30
|
+
specifies the object in which the resulting values should be
|
31
|
+
placed. You can use this to have the values inserted into a
|
32
|
+
custom hash, or another object responding to #[] and #[]=.
|
33
|
+
|
34
|
+
* A validators_operator validation has been added to the
|
35
|
+
validation_helpers plugin:
|
36
|
+
|
37
|
+
class Example < Sequel::Model
|
38
|
+
def validate
|
39
|
+
super
|
40
|
+
validates_operator(:>, 3, :column1)
|
41
|
+
validates_operator(:<=, 4, [:column2, :column3])
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
* The pg_range extension now adds a #register_range_type Database
|
46
|
+
method, supporting per-Database custom range types:
|
47
|
+
|
48
|
+
DB.register_range_type('timerange')
|
49
|
+
|
50
|
+
* The dataset_associations plugin now supports a
|
51
|
+
:dataset_associations_join association option on associations that
|
52
|
+
use joined datasets. This option will have the datasets returned
|
53
|
+
by the dataset association methods join to the same tables that
|
54
|
+
would be joined when retriving the associated objects, allowing
|
55
|
+
selected columns, orders, and filters that reference columns in
|
56
|
+
the joined tables to work correctly.
|
57
|
+
|
58
|
+
* The Database :preconnect option can now be set to :concurrently,
|
59
|
+
which will create the connections in separate threads. This can
|
60
|
+
significantly speed up preconnection in high-latency environments.
|
61
|
+
|
62
|
+
* The Database :name option is now supported, holding an arbitrary
|
63
|
+
name for the database. Currently, it is only used in PoolTimeout
|
64
|
+
exception messages, but it may be used in other places in the
|
65
|
+
future.
|
66
|
+
|
67
|
+
= Other Improvements
|
68
|
+
|
69
|
+
* The prepared_statements_safe plugin now works correctly when using
|
70
|
+
CURRENT_DATE and CURRENT_TIMESTAMP default values for columns.
|
71
|
+
|
72
|
+
* Sequel now recognizes an addition unique constraint violation on
|
73
|
+
Microsoft SQL Server.
|
74
|
+
|
75
|
+
* PoolTimeout exception messages now include the server/shard to which
|
76
|
+
the connection was attempted when using the sharded threaded
|
77
|
+
connection pool.
|
78
|
+
|
79
|
+
= Backwards Compatibility
|
80
|
+
|
81
|
+
* Users of sequel_pg should upgrade to 1.6.17, as older versions of
|
82
|
+
sequel_pg may not work with Sequel 4.34.0+.
|
83
|
+
|
84
|
+
* Any custom extensions that override Dataset#to_hash,
|
85
|
+
#to_hash_groups, #select_hash, and #select_hash_groups need to
|
86
|
+
be modified to add support for accepting the options hash.
|
data/doc/testing.rdoc
CHANGED
@@ -154,6 +154,7 @@ The SEQUEL_INTEGRATION_URL environment variable specifies the Database connectio
|
|
154
154
|
|
155
155
|
SEQUEL_COLUMNS_INTROSPECTION :: Use the columns_introspection extension when running the specs
|
156
156
|
SEQUEL_CONNECTION_VALIDATOR :: Use the connection validator extension when running the specs
|
157
|
+
SEQUEL_DUPLICATE_COLUMNS_HANDLER :: Use the duplicate columns handler extension with value given when running the specs
|
157
158
|
SEQUEL_ERROR_SQL :: Use the error_sql extension when running the specs
|
158
159
|
SEQUEL_NO_AUTO_LITERAL_STRINGS :: Use the no_auto_string_literals extension when running the specs
|
159
160
|
SEQUEL_NO_CACHE_ASSOCIATIONS :: Don't cache association metadata when running the specs
|
data/doc/validations.rdoc
CHANGED
@@ -189,7 +189,7 @@ This is similar to +validates_presence+, but only checks for NULL/nil values, al
|
|
189
189
|
def validate
|
190
190
|
super
|
191
191
|
validates_format /\A\d\d\d-\d-\d{7}-\d-\d\z/, :isbn
|
192
|
-
validates_format /\
|
192
|
+
validates_format /\A[0-9a-zA-Z:' ]+\z/, :name
|
193
193
|
end
|
194
194
|
end
|
195
195
|
|
@@ -231,6 +231,17 @@ These methods check that the specified attributes can be valid integers or valid
|
|
231
231
|
end
|
232
232
|
end
|
233
233
|
|
234
|
+
=== +validates_operator+
|
235
|
+
|
236
|
+
+validates_operator+ checks that a given +operator+ method returns a truthy value when called on attribute with a specified value for comparison. Generally, this is used for inequality checks (>, >=, etc) but any method that can be called on the attribute that accepts an argument and returns a truthy value may be used.
|
237
|
+
|
238
|
+
class Album < Sequel::Model
|
239
|
+
def validate
|
240
|
+
super
|
241
|
+
validates_operator(:>, 3, :tracks)
|
242
|
+
end
|
243
|
+
end
|
244
|
+
|
234
245
|
=== +validates_type+
|
235
246
|
|
236
247
|
+validates_type+ checks that the specified attributes are instances of the class specified in the first argument. The class can be specified as the class itself, or as a string or symbol with the class name, or as a an array of classes.
|
data/lib/sequel/adapters/ado.rb
CHANGED
@@ -135,7 +135,7 @@ module Sequel
|
|
135
135
|
def fetch_rows(sql)
|
136
136
|
execute(sql) do |s|
|
137
137
|
columns = cols = s.Fields.extend(Enumerable).map{|column| output_identifier(column.Name)}
|
138
|
-
|
138
|
+
self.columns = columns
|
139
139
|
s.getRows.transpose.each do |r|
|
140
140
|
row = {}
|
141
141
|
cols.each{|c| row[c] = r.shift}
|
@@ -160,7 +160,7 @@ module Sequel
|
|
160
160
|
# Yield a hash for each row in the dataset.
|
161
161
|
def fetch_rows(sql)
|
162
162
|
execute(sql) do |stmt|
|
163
|
-
|
163
|
+
self.columns = cols = stmt.result_fields.map{|c| output_identifier(c)}
|
164
164
|
col_count = cols.size
|
165
165
|
stmt.each do |result|
|
166
166
|
row = {}
|
@@ -127,7 +127,7 @@ module Sequel
|
|
127
127
|
execute(sql) do |stmt|
|
128
128
|
begin
|
129
129
|
cols = stmt.column_info.map{|c| [output_identifier(c[COLUMN_INFO_NAME]), CUBRID_TYPE_PROCS[c[COLUMN_INFO_TYPE]]]}
|
130
|
-
|
130
|
+
self.columns = cols.map(&:first)
|
131
131
|
stmt.each do |r|
|
132
132
|
row = {}
|
133
133
|
cols.zip(r).each{|(k, p), v| row[k] = (v && p) ? p.call(v) : v}
|
data/lib/sequel/adapters/do.rb
CHANGED
@@ -144,7 +144,7 @@ module Sequel
|
|
144
144
|
# with symbol keys.
|
145
145
|
def fetch_rows(sql)
|
146
146
|
execute(sql) do |reader|
|
147
|
-
cols =
|
147
|
+
cols = self.columns = reader.fields.map{|f| output_identifier(f)}
|
148
148
|
while(reader.next!) do
|
149
149
|
h = {}
|
150
150
|
cols.zip(reader.values).each{|k, v| h[k] = v}
|
data/lib/sequel/adapters/jdbc.rb
CHANGED
@@ -800,7 +800,7 @@ module Sequel
|
|
800
800
|
i += 1
|
801
801
|
cols << [output_identifier(meta.getColumnLabel(i)), i, convert ? type_convertor(map, meta, meta.getColumnType(i), i) : basic_type_convertor(map, meta, meta.getColumnType(i), i)]
|
802
802
|
end
|
803
|
-
|
803
|
+
self.columns = cols.map{|c| c.at(0)}
|
804
804
|
|
805
805
|
while result.next
|
806
806
|
row = {}
|
data/lib/sequel/adapters/mock.rb
CHANGED
@@ -305,7 +305,7 @@ module Sequel
|
|
305
305
|
type_proc = f.type == 1 && cast_tinyint_integer?(f) ? cps[2] : cps[f.type]
|
306
306
|
[output_identifier(f.name), type_proc, i+=1]
|
307
307
|
end
|
308
|
-
|
308
|
+
self.columns = cols.map(&:first)
|
309
309
|
if opts[:split_multiple_result_sets]
|
310
310
|
s = []
|
311
311
|
yield_rows(r, cols){|h| s << h}
|
@@ -154,7 +154,7 @@ module Sequel
|
|
154
154
|
# Yield all rows matching this dataset.
|
155
155
|
def fetch_rows(sql)
|
156
156
|
execute(sql) do |r|
|
157
|
-
|
157
|
+
self.columns = if identifier_output_method
|
158
158
|
r.fields.map!{|c| output_identifier(c.to_s)}
|
159
159
|
else
|
160
160
|
r.fields
|
data/lib/sequel/adapters/odbc.rb
CHANGED
@@ -366,7 +366,7 @@ module Sequel
|
|
366
366
|
cols = columns = cursor.get_col_names.map{|c| output_identifier(c)}
|
367
367
|
metadata = cursor.column_metadata
|
368
368
|
cm = cols.zip(metadata).map{|c, m| [c, cps[m.data_type]]}
|
369
|
-
|
369
|
+
self.columns = columns
|
370
370
|
while r = cursor.fetch
|
371
371
|
row = {}
|
372
372
|
r.zip(cm).each{|v, (c, cp)| row[c] = ((v && cp) ? cp.call(v) : v)}
|
@@ -341,7 +341,7 @@ module Sequel
|
|
341
341
|
end
|
342
342
|
|
343
343
|
DATABASE_ERROR_REGEXPS = {
|
344
|
-
/Violation of UNIQUE KEY constraint|Violation of PRIMARY KEY constraint.+Cannot insert duplicate key/ => UniqueConstraintViolation,
|
344
|
+
/Violation of UNIQUE KEY constraint|(Violation of PRIMARY KEY constraint.+)?Cannot insert duplicate key/ => UniqueConstraintViolation,
|
345
345
|
/conflicted with the (FOREIGN KEY.*|REFERENCE) constraint/ => ForeignKeyConstraintViolation,
|
346
346
|
/conflicted with the CHECK constraint/ => CheckConstraintViolation,
|
347
347
|
/column does not allow nulls/ => NotNullConstraintViolation,
|
@@ -324,7 +324,7 @@ module Sequel
|
|
324
324
|
cps = db.conversion_procs
|
325
325
|
type_procs = result.types.map{|t| cps[base_type_name(t)]}
|
326
326
|
cols = result.columns.map{|c| i+=1; [output_identifier(c), i, type_procs[i]]}
|
327
|
-
|
327
|
+
self.columns = cols.map(&:first)
|
328
328
|
result.each do |values|
|
329
329
|
row = {}
|
330
330
|
cols.each do |name,id,type_proc|
|
@@ -134,7 +134,7 @@ module Sequel
|
|
134
134
|
def fetch_rows(sql)
|
135
135
|
execute(sql) do |res|
|
136
136
|
col_map = {}
|
137
|
-
|
137
|
+
self.columns = res.fields.map do |c|
|
138
138
|
col_map[c] = output_identifier(c)
|
139
139
|
end
|
140
140
|
tz = db.timezone if Sequel.application_timezone
|
@@ -225,7 +225,7 @@ module Sequel
|
|
225
225
|
result.each(*args) do |r|
|
226
226
|
unless cols
|
227
227
|
cols = result.fields.map{|c| [c, output_identifier(c)]}
|
228
|
-
|
228
|
+
self.columns = columns = cols.map(&:last)
|
229
229
|
end
|
230
230
|
h = {}
|
231
231
|
cols.each do |s, sym|
|
@@ -234,7 +234,7 @@ module Sequel
|
|
234
234
|
yield h
|
235
235
|
end
|
236
236
|
else
|
237
|
-
|
237
|
+
self.columns = columns
|
238
238
|
if db.timezone == :utc
|
239
239
|
result.each(:timezone=>:utc){|r| yield r}
|
240
240
|
else
|
@@ -79,6 +79,8 @@ class Sequel::ConnectionPool
|
|
79
79
|
# :preconnect :: Automatically create the maximum number of connections, so that they don't
|
80
80
|
# need to be created as needed. This is useful when connecting takes a long time
|
81
81
|
# and you want to avoid possible latency during runtime.
|
82
|
+
# Set to :concurrently to create the connections in separate threads. Otherwise
|
83
|
+
# they'll be created sequentially.
|
82
84
|
def initialize(db, opts=OPTS)
|
83
85
|
@db = db
|
84
86
|
@after_connect = opts[:after_connect]
|
@@ -193,7 +193,7 @@ class Sequel::ShardedThreadedConnectionPool < Sequel::ThreadedConnectionPool
|
|
193
193
|
until conn = _acquire(thread, server)
|
194
194
|
deadline ||= time + @timeout
|
195
195
|
current_time = Time.now
|
196
|
-
|
196
|
+
raise_pool_timeout(current_time - time, server) if current_time > deadline
|
197
197
|
# :nocov:
|
198
198
|
# It's difficult to get to this point, it can only happen if there is a race condition
|
199
199
|
# where a connection cannot be acquired even after the thread is signalled by the condition
|
@@ -215,7 +215,7 @@ class Sequel::ShardedThreadedConnectionPool < Sequel::ThreadedConnectionPool
|
|
215
215
|
sleep_time = @sleep_time
|
216
216
|
sleep sleep_time
|
217
217
|
until conn = sync{_acquire(thread, server)}
|
218
|
-
|
218
|
+
raise_pool_timeout(Time.now - time, server) if Time.now > timeout
|
219
219
|
sleep sleep_time
|
220
220
|
end
|
221
221
|
end
|
@@ -290,8 +290,21 @@ class Sequel::ShardedThreadedConnectionPool < Sequel::ThreadedConnectionPool
|
|
290
290
|
end
|
291
291
|
|
292
292
|
# Create the maximum number of connections to each server immediately.
|
293
|
-
def preconnect
|
294
|
-
servers.
|
293
|
+
def preconnect(concurrent = false)
|
294
|
+
conn_servers = @servers.keys.map{|s| Array.new(max_size - size(s), s)}.flatten
|
295
|
+
|
296
|
+
if concurrent
|
297
|
+
conn_servers.map{|s| Thread.new{[s, make_new(s)]}}.map(&:join).each{|t| checkin_connection(*t.value)}
|
298
|
+
else
|
299
|
+
conn_servers.each{|s| checkin_connection(s, make_new(s))}
|
300
|
+
end
|
301
|
+
end
|
302
|
+
|
303
|
+
# Raise a PoolTimeout error showing the current timeout, the elapsed time, the server
|
304
|
+
# the connection attempt was made to, and the database's name (if any).
|
305
|
+
def raise_pool_timeout(elapsed, server)
|
306
|
+
name = db.opts[:name]
|
307
|
+
raise ::Sequel::PoolTimeout, "timeout: #{@timeout}, elapsed: #{elapsed}, server: #{server}#{", database name: #{name}" if name}"
|
295
308
|
end
|
296
309
|
|
297
310
|
# Releases the connection assigned to the supplied thread and server. If the
|
@@ -160,7 +160,7 @@ class Sequel::ThreadedConnectionPool < Sequel::ConnectionPool
|
|
160
160
|
until conn = _acquire(thread)
|
161
161
|
deadline ||= time + @timeout
|
162
162
|
current_time = Time.now
|
163
|
-
|
163
|
+
raise_pool_timeout(current_time - time) if current_time > deadline
|
164
164
|
# :nocov:
|
165
165
|
# It's difficult to get to this point, it can only happen if there is a race condition
|
166
166
|
# where a connection cannot be acquired even after the thread is signalled by the condition
|
@@ -181,7 +181,7 @@ class Sequel::ThreadedConnectionPool < Sequel::ConnectionPool
|
|
181
181
|
sleep_time = @sleep_time
|
182
182
|
sleep sleep_time
|
183
183
|
until conn = sync{_acquire(thread)}
|
184
|
-
|
184
|
+
raise_pool_timeout(Time.now - time) if Time.now > timeout
|
185
185
|
sleep sleep_time
|
186
186
|
end
|
187
187
|
end
|
@@ -243,8 +243,21 @@ class Sequel::ThreadedConnectionPool < Sequel::ConnectionPool
|
|
243
243
|
end
|
244
244
|
|
245
245
|
# Create the maximum number of connections immediately.
|
246
|
-
def preconnect
|
247
|
-
(max_size - size).times
|
246
|
+
def preconnect(concurrent = false)
|
247
|
+
enum = (max_size - size).times
|
248
|
+
|
249
|
+
if concurrent
|
250
|
+
enum.map{Thread.new{make_new(nil)}}.map(&:join).each{|t| checkin_connection(t.value)}
|
251
|
+
else
|
252
|
+
enum.each{checkin_connection(make_new(nil))}
|
253
|
+
end
|
254
|
+
end
|
255
|
+
|
256
|
+
# Raise a PoolTimeout error showing the current timeout, the elapsed time, and the
|
257
|
+
# database's name (if any).
|
258
|
+
def raise_pool_timeout(elapsed)
|
259
|
+
name = db.opts[:name]
|
260
|
+
raise ::Sequel::PoolTimeout, "timeout: #{@timeout}, elapsed: #{elapsed}#{", database name: #{name}" if name}"
|
248
261
|
end
|
249
262
|
|
250
263
|
# Releases the connection assigned to the supplied thread back to the pool.
|