sequel 5.67.0 → 5.74.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG +86 -0
- data/README.rdoc +3 -3
- data/doc/advanced_associations.rdoc +3 -1
- data/doc/mass_assignment.rdoc +1 -1
- data/doc/migration.rdoc +15 -0
- data/doc/opening_databases.rdoc +8 -1
- data/doc/release_notes/5.68.0.txt +61 -0
- data/doc/release_notes/5.69.0.txt +26 -0
- data/doc/release_notes/5.70.0.txt +35 -0
- data/doc/release_notes/5.71.0.txt +21 -0
- data/doc/release_notes/5.72.0.txt +33 -0
- data/doc/release_notes/5.73.0.txt +66 -0
- data/doc/release_notes/5.74.0.txt +45 -0
- data/doc/sharding.rdoc +3 -1
- data/doc/testing.rdoc +1 -1
- data/lib/sequel/adapters/ibmdb.rb +1 -1
- data/lib/sequel/adapters/jdbc/postgresql.rb +3 -0
- data/lib/sequel/adapters/jdbc/sqlanywhere.rb +4 -0
- data/lib/sequel/adapters/jdbc/sqlserver.rb +4 -0
- data/lib/sequel/adapters/jdbc.rb +10 -6
- data/lib/sequel/adapters/mysql.rb +19 -7
- data/lib/sequel/adapters/shared/db2.rb +12 -0
- data/lib/sequel/adapters/shared/postgres.rb +70 -6
- data/lib/sequel/adapters/shared/sqlite.rb +0 -1
- data/lib/sequel/adapters/trilogy.rb +117 -0
- data/lib/sequel/connection_pool/sharded_threaded.rb +11 -10
- data/lib/sequel/connection_pool/sharded_timed_queue.rb +374 -0
- data/lib/sequel/connection_pool/threaded.rb +6 -0
- data/lib/sequel/connection_pool/timed_queue.rb +16 -3
- data/lib/sequel/connection_pool.rb +8 -1
- data/lib/sequel/database/connecting.rb +1 -1
- data/lib/sequel/database/schema_methods.rb +4 -3
- data/lib/sequel/database/transactions.rb +6 -0
- data/lib/sequel/dataset/actions.rb +8 -6
- data/lib/sequel/extensions/async_thread_pool.rb +3 -2
- data/lib/sequel/extensions/connection_expiration.rb +15 -9
- data/lib/sequel/extensions/connection_validator.rb +15 -10
- data/lib/sequel/extensions/index_caching.rb +5 -1
- data/lib/sequel/extensions/migration.rb +18 -5
- data/lib/sequel/extensions/pg_array.rb +9 -1
- data/lib/sequel/extensions/pg_auto_parameterize_in_array.rb +110 -0
- data/lib/sequel/extensions/pg_enum.rb +1 -2
- data/lib/sequel/extensions/pg_extended_date_support.rb +10 -2
- data/lib/sequel/extensions/pg_json_ops.rb +52 -0
- data/lib/sequel/extensions/pg_multirange.rb +1 -1
- data/lib/sequel/extensions/pg_range.rb +1 -1
- data/lib/sequel/extensions/pg_row.rb +2 -6
- data/lib/sequel/extensions/schema_caching.rb +1 -1
- data/lib/sequel/extensions/server_block.rb +2 -1
- data/lib/sequel/model/base.rb +20 -10
- data/lib/sequel/model/dataset_module.rb +3 -0
- data/lib/sequel/model/exceptions.rb +15 -3
- data/lib/sequel/plugins/column_encryption.rb +26 -5
- data/lib/sequel/plugins/constraint_validations.rb +8 -5
- data/lib/sequel/plugins/defaults_setter.rb +16 -0
- data/lib/sequel/plugins/mssql_optimistic_locking.rb +8 -38
- data/lib/sequel/plugins/optimistic_locking.rb +9 -42
- data/lib/sequel/plugins/optimistic_locking_base.rb +55 -0
- data/lib/sequel/plugins/paged_operations.rb +181 -0
- data/lib/sequel/plugins/pg_auto_constraint_validations.rb +8 -2
- data/lib/sequel/plugins/pg_xmin_optimistic_locking.rb +109 -0
- data/lib/sequel/plugins/static_cache.rb +38 -0
- data/lib/sequel/plugins/static_cache_cache.rb +5 -1
- data/lib/sequel/plugins/validation_helpers.rb +8 -1
- data/lib/sequel/plugins/validation_helpers_generic_type_messages.rb +73 -0
- data/lib/sequel/version.rb +1 -1
- metadata +37 -2
@@ -0,0 +1,374 @@
|
|
1
|
+
# frozen-string-literal: true
|
2
|
+
|
3
|
+
# :nocov:
|
4
|
+
raise LoadError, "Sequel::ShardedTimedQueueConnectionPool is only available on Ruby 3.2+" unless RUBY_VERSION >= '3.2'
|
5
|
+
# :nocov:
|
6
|
+
|
7
|
+
# A connection pool allowing multi-threaded access to a sharded pool of connections,
|
8
|
+
# using a timed queue (only available in Ruby 3.2+).
|
9
|
+
class Sequel::ShardedTimedQueueConnectionPool < Sequel::ConnectionPool
|
10
|
+
# The maximum number of connections this pool will create per shard.
|
11
|
+
attr_reader :max_size
|
12
|
+
|
13
|
+
# The following additional options are respected:
|
14
|
+
# :max_connections :: The maximum number of connections the connection pool
|
15
|
+
# will open (default 4)
|
16
|
+
# :pool_timeout :: The amount of seconds to wait to acquire a connection
|
17
|
+
# before raising a PoolTimeout (default 5)
|
18
|
+
# :servers :: A hash of servers to use. Keys should be symbols. If not
|
19
|
+
# present, will use a single :default server.
|
20
|
+
# :servers_hash :: The base hash to use for the servers. By default,
|
21
|
+
# Sequel uses Hash.new(:default). You can use a hash with a default proc
|
22
|
+
# that raises an error if you want to catch all cases where a nonexistent
|
23
|
+
# server is used.
|
24
|
+
def initialize(db, opts = OPTS)
|
25
|
+
super
|
26
|
+
|
27
|
+
@max_size = Integer(opts[:max_connections] || 4)
|
28
|
+
raise(Sequel::Error, ':max_connections must be positive') if @max_size < 1
|
29
|
+
@mutex = Mutex.new
|
30
|
+
@timeout = Float(opts[:pool_timeout] || 5)
|
31
|
+
|
32
|
+
@allocated = {}
|
33
|
+
@sizes = {}
|
34
|
+
@queues = {}
|
35
|
+
@servers = opts.fetch(:servers_hash, Hash.new(:default))
|
36
|
+
|
37
|
+
add_servers([:default])
|
38
|
+
add_servers(opts[:servers].keys) if opts[:servers]
|
39
|
+
end
|
40
|
+
|
41
|
+
# Adds new servers to the connection pool. Allows for dynamic expansion of the potential replicas/shards
|
42
|
+
# at runtime. +servers+ argument should be an array of symbols.
|
43
|
+
def add_servers(servers)
|
44
|
+
sync do
|
45
|
+
servers.each do |server|
|
46
|
+
next if @servers.has_key?(server)
|
47
|
+
|
48
|
+
@servers[server] = server
|
49
|
+
@sizes[server] = 0
|
50
|
+
@queues[server] = Queue.new
|
51
|
+
(@allocated[server] = {}).compare_by_identity
|
52
|
+
end
|
53
|
+
end
|
54
|
+
nil
|
55
|
+
end
|
56
|
+
|
57
|
+
# Yield all of the available connections, and the one currently allocated to
|
58
|
+
# this thread (if one is allocated). This will not yield connections currently
|
59
|
+
# allocated to other threads, as it is not safe to operate on them.
|
60
|
+
def all_connections
|
61
|
+
thread = Sequel.current
|
62
|
+
sync{@queues.to_a}.each do |server, queue|
|
63
|
+
if conn = owned_connection(thread, server)
|
64
|
+
yield conn
|
65
|
+
end
|
66
|
+
|
67
|
+
# Use a hash to record all connections already seen. As soon as we
|
68
|
+
# come across a connection we've already seen, we stop the loop.
|
69
|
+
conns = {}
|
70
|
+
conns.compare_by_identity
|
71
|
+
while true
|
72
|
+
conn = nil
|
73
|
+
begin
|
74
|
+
break unless (conn = queue.pop(timeout: 0)) && !conns[conn]
|
75
|
+
conns[conn] = true
|
76
|
+
yield conn
|
77
|
+
ensure
|
78
|
+
queue.push(conn) if conn
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
nil
|
84
|
+
end
|
85
|
+
|
86
|
+
# Removes all connections currently in the pool's queue. This method has the effect of
|
87
|
+
# disconnecting from the database, assuming that no connections are currently
|
88
|
+
# being used.
|
89
|
+
#
|
90
|
+
# Once a connection is requested using #hold, the connection pool
|
91
|
+
# creates new connections to the database.
|
92
|
+
#
|
93
|
+
# If the :server option is provided, it should be a symbol or array of symbols,
|
94
|
+
# and then the method will only disconnect connectsion from those specified shards.
|
95
|
+
def disconnect(opts=OPTS)
|
96
|
+
(opts[:server] ? Array(opts[:server]) : sync{@servers.keys}).each do |server|
|
97
|
+
raise Sequel::Error, "invalid server" unless queue = sync{@queues[server]}
|
98
|
+
while conn = queue.pop(timeout: 0)
|
99
|
+
disconnect_pool_connection(conn, server)
|
100
|
+
end
|
101
|
+
fill_queue(server)
|
102
|
+
end
|
103
|
+
nil
|
104
|
+
end
|
105
|
+
|
106
|
+
# Chooses the first available connection for the given server, or if none are
|
107
|
+
# available, creates a new connection. Passes the connection to the supplied
|
108
|
+
# block:
|
109
|
+
#
|
110
|
+
# pool.hold(:server1) {|conn| conn.execute('DROP TABLE posts')}
|
111
|
+
#
|
112
|
+
# Pool#hold is re-entrant, meaning it can be called recursively in
|
113
|
+
# the same thread without blocking.
|
114
|
+
#
|
115
|
+
# If no connection is immediately available and the pool is already using the maximum
|
116
|
+
# number of connections, Pool#hold will block until a connection
|
117
|
+
# is available or the timeout expires. If the timeout expires before a
|
118
|
+
# connection can be acquired, a Sequel::PoolTimeout is raised.
|
119
|
+
def hold(server=:default)
|
120
|
+
server = pick_server(server)
|
121
|
+
t = Sequel.current
|
122
|
+
if conn = owned_connection(t, server)
|
123
|
+
return yield(conn)
|
124
|
+
end
|
125
|
+
|
126
|
+
begin
|
127
|
+
conn = acquire(t, server)
|
128
|
+
yield conn
|
129
|
+
rescue Sequel::DatabaseDisconnectError, *@error_classes => e
|
130
|
+
if disconnect_error?(e)
|
131
|
+
oconn = conn
|
132
|
+
conn = nil
|
133
|
+
disconnect_pool_connection(oconn, server) if oconn
|
134
|
+
sync{@allocated[server].delete(t)}
|
135
|
+
fill_queue(server)
|
136
|
+
end
|
137
|
+
raise
|
138
|
+
ensure
|
139
|
+
release(t, conn, server) if conn
|
140
|
+
end
|
141
|
+
end
|
142
|
+
|
143
|
+
# The total number of connections in the pool. Using a non-existant server will return nil.
|
144
|
+
def size(server=:default)
|
145
|
+
sync{@sizes[server]}
|
146
|
+
end
|
147
|
+
|
148
|
+
# Remove servers from the connection pool. Similar to disconnecting from all given servers,
|
149
|
+
# except that after it is used, future requests for the servers will use the
|
150
|
+
# :default server instead.
|
151
|
+
#
|
152
|
+
# Note that an error will be raised if there are any connections currently checked
|
153
|
+
# out for the given servers.
|
154
|
+
def remove_servers(servers)
|
155
|
+
conns = []
|
156
|
+
raise(Sequel::Error, "cannot remove default server") if servers.include?(:default)
|
157
|
+
|
158
|
+
sync do
|
159
|
+
servers.each do |server|
|
160
|
+
next unless @servers.has_key?(server)
|
161
|
+
|
162
|
+
queue = @queues[server]
|
163
|
+
|
164
|
+
while conn = queue.pop(timeout: 0)
|
165
|
+
@sizes[server] -= 1
|
166
|
+
conns << conn
|
167
|
+
end
|
168
|
+
|
169
|
+
unless @sizes[server] == 0
|
170
|
+
raise Sequel::Error, "cannot remove server #{server} as it has allocated connections"
|
171
|
+
end
|
172
|
+
|
173
|
+
@servers.delete(server)
|
174
|
+
@sizes.delete(server)
|
175
|
+
@queues.delete(server)
|
176
|
+
@allocated.delete(server)
|
177
|
+
end
|
178
|
+
end
|
179
|
+
|
180
|
+
nil
|
181
|
+
ensure
|
182
|
+
disconnect_connections(conns)
|
183
|
+
end
|
184
|
+
|
185
|
+
# Return an array of symbols for servers in the connection pool.
|
186
|
+
def servers
|
187
|
+
sync{@servers.keys}
|
188
|
+
end
|
189
|
+
|
190
|
+
def pool_type
|
191
|
+
:sharded_timed_queue
|
192
|
+
end
|
193
|
+
|
194
|
+
private
|
195
|
+
|
196
|
+
# Create a new connection, after the pool's current size has already
|
197
|
+
# been updated to account for the new connection. If there is an exception
|
198
|
+
# when creating the connection, decrement the current size.
|
199
|
+
#
|
200
|
+
# This should only be called after can_make_new?. If there is an exception
|
201
|
+
# between when can_make_new? is called and when preallocated_make_new
|
202
|
+
# is called, it has the effect of reducing the maximum size of the
|
203
|
+
# connection pool by 1, since the current size of the pool will show a
|
204
|
+
# higher number than the number of connections allocated or
|
205
|
+
# in the queue.
|
206
|
+
#
|
207
|
+
# Calling code should not have the mutex when calling this.
|
208
|
+
def preallocated_make_new(server)
|
209
|
+
make_new(server)
|
210
|
+
rescue Exception
|
211
|
+
sync{@sizes[server] -= 1}
|
212
|
+
raise
|
213
|
+
end
|
214
|
+
|
215
|
+
# Disconnect all available connections immediately, and schedule currently allocated connections for disconnection
|
216
|
+
# as soon as they are returned to the pool. The calling code should NOT
|
217
|
+
# have the mutex before calling this.
|
218
|
+
def disconnect_connections(conns)
|
219
|
+
conns.each{|conn| disconnect_connection(conn)}
|
220
|
+
end
|
221
|
+
|
222
|
+
# Decrement the current size of the pool for the server when disconnecting connections.
|
223
|
+
#
|
224
|
+
# Calling code should not have the mutex when calling this.
|
225
|
+
def disconnect_pool_connection(conn, server)
|
226
|
+
sync{@sizes[server] -= 1}
|
227
|
+
disconnect_connection(conn)
|
228
|
+
end
|
229
|
+
|
230
|
+
# If there are any threads waiting on the queue, try to create
|
231
|
+
# new connections in a separate thread if the pool is not yet at the
|
232
|
+
# maximum size.
|
233
|
+
#
|
234
|
+
# The reason for this method is to handle cases where acquire
|
235
|
+
# could not retrieve a connection immediately, and the pool
|
236
|
+
# was already at the maximum size. In that case, the acquire will
|
237
|
+
# wait on the queue until the timeout. This method is called
|
238
|
+
# after disconnecting to potentially add new connections to the
|
239
|
+
# pool, so the threads that are currently waiting for connections
|
240
|
+
# do not timeout after the pool is no longer full.
|
241
|
+
def fill_queue(server)
|
242
|
+
queue = sync{@queues[server]}
|
243
|
+
if queue.num_waiting > 0
|
244
|
+
Thread.new do
|
245
|
+
while queue.num_waiting > 0 && (conn = try_make_new(server))
|
246
|
+
queue.push(conn)
|
247
|
+
end
|
248
|
+
end
|
249
|
+
end
|
250
|
+
end
|
251
|
+
|
252
|
+
# Whether the given size is less than the maximum size of the pool.
|
253
|
+
# In that case, the pool's current size is incremented. If this
|
254
|
+
# method returns true, space in the pool for the connection is
|
255
|
+
# preallocated, and preallocated_make_new should be called to
|
256
|
+
# create the connection.
|
257
|
+
#
|
258
|
+
# Calling code should have the mutex when calling this.
|
259
|
+
def can_make_new?(server, current_size)
|
260
|
+
if @max_size > current_size
|
261
|
+
@sizes[server] += 1
|
262
|
+
end
|
263
|
+
end
|
264
|
+
|
265
|
+
# Try to make a new connection if there is space in the pool.
|
266
|
+
# If the pool is already full, look for dead threads/fibers and
|
267
|
+
# disconnect the related connections.
|
268
|
+
#
|
269
|
+
# Calling code should not have the mutex when calling this.
|
270
|
+
def try_make_new(server)
|
271
|
+
return preallocated_make_new(server) if sync{can_make_new?(server, @sizes[server])}
|
272
|
+
|
273
|
+
to_disconnect = nil
|
274
|
+
do_make_new = false
|
275
|
+
|
276
|
+
sync do
|
277
|
+
current_size = @sizes[server]
|
278
|
+
alloc = @allocated[server]
|
279
|
+
alloc.keys.each do |t|
|
280
|
+
unless t.alive?
|
281
|
+
(to_disconnect ||= []) << alloc.delete(t)
|
282
|
+
current_size -= 1
|
283
|
+
end
|
284
|
+
end
|
285
|
+
|
286
|
+
do_make_new = true if can_make_new?(server, current_size)
|
287
|
+
end
|
288
|
+
|
289
|
+
begin
|
290
|
+
preallocated_make_new(server) if do_make_new
|
291
|
+
ensure
|
292
|
+
if to_disconnect
|
293
|
+
to_disconnect.each{|conn| disconnect_pool_connection(conn, server)}
|
294
|
+
fill_queue(server)
|
295
|
+
end
|
296
|
+
end
|
297
|
+
end
|
298
|
+
|
299
|
+
# Assigns a connection to the supplied thread, if one
|
300
|
+
# is available.
|
301
|
+
#
|
302
|
+
# This should return a connection if one is available within the timeout,
|
303
|
+
# or raise PoolTimeout if a connection could not be acquired within the timeout.
|
304
|
+
#
|
305
|
+
# Calling code should not have the mutex when calling this.
|
306
|
+
def acquire(thread, server)
|
307
|
+
queue = sync{@queues[server]}
|
308
|
+
if conn = queue.pop(timeout: 0) || try_make_new(server) || queue.pop(timeout: @timeout)
|
309
|
+
sync{@allocated[server][thread] = conn}
|
310
|
+
else
|
311
|
+
name = db.opts[:name]
|
312
|
+
raise ::Sequel::PoolTimeout, "timeout: #{@timeout}, server: #{server}#{", database name: #{name}" if name}"
|
313
|
+
end
|
314
|
+
end
|
315
|
+
|
316
|
+
# Returns the connection owned by the supplied thread for the given server,
|
317
|
+
# if any. The calling code should NOT already have the mutex before calling this.
|
318
|
+
def owned_connection(thread, server)
|
319
|
+
sync{@allocated[server][thread]}
|
320
|
+
end
|
321
|
+
|
322
|
+
# If the server given is in the hash, return it, otherwise, return the default server.
|
323
|
+
def pick_server(server)
|
324
|
+
sync{@servers[server]}
|
325
|
+
end
|
326
|
+
|
327
|
+
# Create the maximum number of connections immediately. This should not be called
|
328
|
+
# with a true argument unless no code is currently operating on the database.
|
329
|
+
#
|
330
|
+
# Calling code should not have the mutex when calling this.
|
331
|
+
def preconnect(concurrent = false)
|
332
|
+
conn_servers = sync{@servers.keys}.map!{|s| Array.new(@max_size - @sizes[s], s)}.flatten!
|
333
|
+
|
334
|
+
if concurrent
|
335
|
+
conn_servers.map! do |server|
|
336
|
+
queue = sync{@queues[server]}
|
337
|
+
Thread.new do
|
338
|
+
if conn = try_make_new(server)
|
339
|
+
queue.push(conn)
|
340
|
+
end
|
341
|
+
end
|
342
|
+
end.each(&:value)
|
343
|
+
else
|
344
|
+
conn_servers.each do |server|
|
345
|
+
if conn = try_make_new(server)
|
346
|
+
sync{@queues[server]}.push(conn)
|
347
|
+
end
|
348
|
+
end
|
349
|
+
end
|
350
|
+
|
351
|
+
nil
|
352
|
+
end
|
353
|
+
|
354
|
+
# Releases the connection assigned to the supplied thread back to the pool.
|
355
|
+
#
|
356
|
+
# Calling code should not have the mutex when calling this.
|
357
|
+
def release(thread, _, server)
|
358
|
+
checkin_connection(sync{@allocated[server].delete(thread)}, server)
|
359
|
+
nil
|
360
|
+
end
|
361
|
+
|
362
|
+
# Adds a connection to the queue of available connections, returns the connection.
|
363
|
+
def checkin_connection(conn, server)
|
364
|
+
sync{@queues[server]}.push(conn)
|
365
|
+
conn
|
366
|
+
end
|
367
|
+
|
368
|
+
# Yield to the block while inside the mutex.
|
369
|
+
#
|
370
|
+
# Calling code should not have the mutex when calling this.
|
371
|
+
def sync
|
372
|
+
@mutex.synchronize{yield}
|
373
|
+
end
|
374
|
+
end
|
@@ -274,6 +274,12 @@ class Sequel::ThreadedConnectionPool < Sequel::ConnectionPool
|
|
274
274
|
end
|
275
275
|
|
276
276
|
@waiter.signal
|
277
|
+
|
278
|
+
# Ensure that after signalling the condition, some other thread is given the
|
279
|
+
# opportunity to acquire the mutex.
|
280
|
+
# See <https://github.com/socketry/async/issues/99> for more context.
|
281
|
+
sleep(0)
|
282
|
+
|
277
283
|
nil
|
278
284
|
end
|
279
285
|
|
@@ -81,7 +81,7 @@ class Sequel::TimedQueueConnectionPool < Sequel::ConnectionPool
|
|
81
81
|
# connection can be acquired, a Sequel::PoolTimeout is raised.
|
82
82
|
def hold(server=nil)
|
83
83
|
t = Sequel.current
|
84
|
-
if conn =
|
84
|
+
if conn = owned_connection(t)
|
85
85
|
return yield(conn)
|
86
86
|
end
|
87
87
|
|
@@ -223,8 +223,14 @@ class Sequel::TimedQueueConnectionPool < Sequel::ConnectionPool
|
|
223
223
|
end
|
224
224
|
end
|
225
225
|
|
226
|
+
# Returns the connection owned by the supplied thread,
|
227
|
+
# if any. The calling code should NOT already have the mutex before calling this.
|
228
|
+
def owned_connection(thread)
|
229
|
+
sync{@allocated[thread]}
|
230
|
+
end
|
231
|
+
|
226
232
|
# Create the maximum number of connections immediately. This should not be called
|
227
|
-
# with a true argument
|
233
|
+
# with a true argument unless no code is currently operating on the database.
|
228
234
|
#
|
229
235
|
# Calling code should not have the mutex when calling this.
|
230
236
|
def preconnect(concurrent = false)
|
@@ -245,7 +251,14 @@ class Sequel::TimedQueueConnectionPool < Sequel::ConnectionPool
|
|
245
251
|
#
|
246
252
|
# Calling code should not have the mutex when calling this.
|
247
253
|
def release(thread)
|
248
|
-
|
254
|
+
checkin_connection(sync{@allocated.delete(thread)})
|
255
|
+
nil
|
256
|
+
end
|
257
|
+
|
258
|
+
# Adds a connection to the queue of available connections, returns the connection.
|
259
|
+
def checkin_connection(conn)
|
260
|
+
@queue.push(conn)
|
261
|
+
conn
|
249
262
|
end
|
250
263
|
|
251
264
|
# Yield to the block while inside the mutex.
|
@@ -32,6 +32,7 @@ class Sequel::ConnectionPool
|
|
32
32
|
:sharded_threaded => :ShardedThreadedConnectionPool,
|
33
33
|
:sharded_single => :ShardedSingleConnectionPool,
|
34
34
|
:timed_queue => :TimedQueueConnectionPool,
|
35
|
+
:sharded_timed_queue => :ShardedTimedQueueConnectionPool,
|
35
36
|
}
|
36
37
|
POOL_CLASS_MAP.to_a.each{|k, v| POOL_CLASS_MAP[k.to_s] = v}
|
37
38
|
POOL_CLASS_MAP.freeze
|
@@ -42,7 +43,8 @@ class Sequel::ConnectionPool
|
|
42
43
|
# Return a pool subclass instance based on the given options. If a <tt>:pool_class</tt>
|
43
44
|
# option is provided is provided, use that pool class, otherwise
|
44
45
|
# use a new instance of an appropriate pool subclass based on the
|
45
|
-
#
|
46
|
+
# +SEQUEL_DEFAULT_CONNECTION_POOL+ environment variable if set, or
|
47
|
+
# the <tt>:single_threaded</tt> and <tt>:servers</tt> options, otherwise.
|
46
48
|
def get_pool(db, opts = OPTS)
|
47
49
|
connection_pool_class(opts).new(db, opts)
|
48
50
|
end
|
@@ -62,9 +64,14 @@ class Sequel::ConnectionPool
|
|
62
64
|
end
|
63
65
|
|
64
66
|
pc
|
67
|
+
elsif pc = ENV['SEQUEL_DEFAULT_CONNECTION_POOL']
|
68
|
+
pc = "sharded_#{pc}" if opts[:servers] && !pc.start_with?('sharded_')
|
69
|
+
connection_pool_class(:pool_class=>pc)
|
65
70
|
else
|
66
71
|
pc = if opts[:single_threaded]
|
67
72
|
opts[:servers] ? :sharded_single : :single
|
73
|
+
#elsif RUBY_VERSION >= '3.2' # SEQUEL6 or maybe earlier
|
74
|
+
# opts[:servers] ? :sharded_timed_queue : :timed_queue
|
68
75
|
else
|
69
76
|
opts[:servers] ? :sharded_threaded : :threaded
|
70
77
|
end
|
@@ -8,7 +8,7 @@ module Sequel
|
|
8
8
|
# ---------------------
|
9
9
|
|
10
10
|
# Array of supported database adapters
|
11
|
-
ADAPTERS = %w'ado amalgalite ibmdb jdbc mock mysql mysql2 odbc oracle postgres sqlanywhere sqlite tinytds'.map(&:to_sym)
|
11
|
+
ADAPTERS = %w'ado amalgalite ibmdb jdbc mock mysql mysql2 odbc oracle postgres sqlanywhere sqlite tinytds trilogy'.map(&:to_sym)
|
12
12
|
|
13
13
|
# The Database subclass for the given adapter scheme.
|
14
14
|
# Raises Sequel::AdapterNotFound if the adapter
|
@@ -712,8 +712,9 @@ module Sequel
|
|
712
712
|
e = options[:ignore_index_errors] || options[:if_not_exists]
|
713
713
|
generator.indexes.each do |index|
|
714
714
|
begin
|
715
|
-
|
716
|
-
|
715
|
+
transaction(:savepoint=>:only, :skip_transaction=>supports_transactional_ddl? == false) do
|
716
|
+
index_sql_list(name, [index]).each{|sql| execute_ddl(sql)}
|
717
|
+
end
|
717
718
|
rescue Error
|
718
719
|
raise unless e
|
719
720
|
end
|
@@ -900,7 +901,7 @@ module Sequel
|
|
900
901
|
#
|
901
902
|
# Any other object given is just converted to a string, with "_" converted to " " and upcased.
|
902
903
|
def on_delete_clause(action)
|
903
|
-
action.to_s.
|
904
|
+
action.to_s.tr("_", " ").upcase
|
904
905
|
end
|
905
906
|
|
906
907
|
# Alias of #on_delete_clause, since the two usually behave the same.
|
@@ -166,6 +166,8 @@ module Sequel
|
|
166
166
|
# uses :auto_savepoint, you can set this to false to not use a savepoint.
|
167
167
|
# If the value given for this option is :only, it will only create a
|
168
168
|
# savepoint if it is inside a transaction.
|
169
|
+
# :skip_transaction :: If set, do not actually open a transaction or savepoint,
|
170
|
+
# just checkout a connection and yield it.
|
169
171
|
#
|
170
172
|
# PostgreSQL specific options:
|
171
173
|
#
|
@@ -193,6 +195,10 @@ module Sequel
|
|
193
195
|
end
|
194
196
|
else
|
195
197
|
synchronize(opts[:server]) do |conn|
|
198
|
+
if opts[:skip_transaction]
|
199
|
+
return yield(conn)
|
200
|
+
end
|
201
|
+
|
196
202
|
if opts[:savepoint] == :only
|
197
203
|
if supports_savepoints?
|
198
204
|
if _trans(conn)
|
@@ -356,9 +356,11 @@ module Sequel
|
|
356
356
|
# This does not have an effect if +values+ is a Dataset.
|
357
357
|
# :server :: Set the server/shard to use for the transaction and insert
|
358
358
|
# queries.
|
359
|
+
# :skip_transaction :: Do not use a transaction even when using multiple
|
360
|
+
# INSERT queries.
|
359
361
|
# :slice :: Same as :commit_every, :commit_every takes precedence.
|
360
362
|
def import(columns, values, opts=OPTS)
|
361
|
-
return
|
363
|
+
return insert(columns, values) if values.is_a?(Dataset)
|
362
364
|
|
363
365
|
return if values.empty?
|
364
366
|
raise(Error, 'Using Sequel::Dataset#import with an empty column array is not allowed') if columns.empty?
|
@@ -588,6 +590,8 @@ module Sequel
|
|
588
590
|
# if your ORDER BY expressions are not simple columns, if they contain
|
589
591
|
# qualified identifiers that would be ambiguous unqualified, if they contain
|
590
592
|
# any identifiers that are aliased in SELECT, and potentially other cases.
|
593
|
+
# :skip_transaction :: Do not use a transaction. This can be useful if you want to prevent
|
594
|
+
# a lock on the database table, at the expense of consistency.
|
591
595
|
#
|
592
596
|
# Examples:
|
593
597
|
#
|
@@ -1111,11 +1115,9 @@ module Sequel
|
|
1111
1115
|
# are provided. When only a single value or statement is provided, then yield
|
1112
1116
|
# without using a transaction.
|
1113
1117
|
def _import_transaction(values, trans_opts, &block)
|
1114
|
-
|
1115
|
-
|
1116
|
-
|
1117
|
-
yield
|
1118
|
-
end
|
1118
|
+
# OK to mutate trans_opts as it is generated by _import
|
1119
|
+
trans_opts[:skip_transaction] = true if values.length <= 1
|
1120
|
+
@db.transaction(trans_opts, &block)
|
1119
1121
|
end
|
1120
1122
|
|
1121
1123
|
# Internals of +select_hash+ and +select_hash_groups+
|
@@ -338,8 +338,9 @@ module Sequel
|
|
338
338
|
module DatabaseMethods
|
339
339
|
def self.extended(db)
|
340
340
|
db.instance_exec do
|
341
|
-
|
342
|
-
|
341
|
+
case pool.pool_type
|
342
|
+
when :single, :sharded_single
|
343
|
+
raise Error, "cannot load async_thread_pool extension if using single or sharded_single connection pool"
|
343
344
|
end
|
344
345
|
|
345
346
|
num_async_threads = opts[:num_async_threads] ? typecast_value_integer(opts[:num_async_threads]) : (Integer(opts[:max_connections] || 4))
|
@@ -15,16 +15,16 @@
|
|
15
15
|
#
|
16
16
|
# DB.pool.connection_expiration_timeout = 3600 # 1 hour
|
17
17
|
#
|
18
|
-
# Note that this extension
|
19
|
-
# and
|
20
|
-
#
|
21
|
-
# not affected. As the only reason to use the single threaded
|
18
|
+
# Note that this extension does not work with the single
|
19
|
+
# threaded and sharded single threaded connection pools.
|
20
|
+
# As the only reason to use the single threaded
|
22
21
|
# pools is for speed, and this extension makes the connection
|
23
22
|
# pool slower, there's not much point in modifying this
|
24
23
|
# extension to work with the single threaded pools. The
|
25
|
-
# threaded pools work fine even in single threaded
|
26
|
-
# you are currently using a single threaded pool
|
27
|
-
# use this extension, switch to using
|
24
|
+
# non-single threaded pools work fine even in single threaded
|
25
|
+
# code, so if you are currently using a single threaded pool
|
26
|
+
# and want to use this extension, switch to using another
|
27
|
+
# pool.
|
28
28
|
#
|
29
29
|
# Related module: Sequel::ConnectionExpiration
|
30
30
|
|
@@ -45,6 +45,11 @@ module Sequel
|
|
45
45
|
|
46
46
|
# Initialize the data structures used by this extension.
|
47
47
|
def self.extended(pool)
|
48
|
+
case pool.pool_type
|
49
|
+
when :single, :sharded_single
|
50
|
+
raise Error, "cannot load connection_expiration extension if using single or sharded_single connection pool"
|
51
|
+
end
|
52
|
+
|
48
53
|
pool.instance_exec do
|
49
54
|
sync do
|
50
55
|
@connection_expiration_timestamps ||= {}
|
@@ -79,8 +84,9 @@ module Sequel
|
|
79
84
|
(cet = sync{@connection_expiration_timestamps[conn]}) &&
|
80
85
|
Sequel.elapsed_seconds_since(cet[0]) > cet[1]
|
81
86
|
|
82
|
-
|
83
|
-
|
87
|
+
case pool_type
|
88
|
+
when :sharded_threaded, :sharded_timed_queue
|
89
|
+
sync{@allocated[a.last].delete(Sequel.current)}
|
84
90
|
else
|
85
91
|
sync{@allocated.delete(Sequel.current)}
|
86
92
|
end
|
@@ -34,16 +34,16 @@
|
|
34
34
|
# web requests to the number to connections in the database
|
35
35
|
# connection pool.
|
36
36
|
#
|
37
|
-
# Note that this extension
|
38
|
-
# and
|
39
|
-
#
|
40
|
-
# not affected. As the only reason to use the single threaded
|
37
|
+
# Note that this extension does not work with the single
|
38
|
+
# threaded and sharded single threaded connection pools.
|
39
|
+
# As the only reason to use the single threaded
|
41
40
|
# pools is for speed, and this extension makes the connection
|
42
41
|
# pool slower, there's not much point in modifying this
|
43
42
|
# extension to work with the single threaded pools. The
|
44
|
-
# threaded pools work fine even in single threaded
|
45
|
-
# you are currently using a single threaded pool
|
46
|
-
# use this extension, switch to using
|
43
|
+
# non-single threaded pools work fine even in single threaded
|
44
|
+
# code, so if you are currently using a single threaded pool
|
45
|
+
# and want to use this extension, switch to using another
|
46
|
+
# pool.
|
47
47
|
#
|
48
48
|
# Related module: Sequel::ConnectionValidator
|
49
49
|
|
@@ -61,6 +61,11 @@ module Sequel
|
|
61
61
|
|
62
62
|
# Initialize the data structures used by this extension.
|
63
63
|
def self.extended(pool)
|
64
|
+
case pool.pool_type
|
65
|
+
when :single, :sharded_single
|
66
|
+
raise Error, "cannot load connection_validator extension if using single or sharded_single connection pool"
|
67
|
+
end
|
68
|
+
|
64
69
|
pool.instance_exec do
|
65
70
|
sync do
|
66
71
|
@connection_timestamps ||= {}
|
@@ -103,8 +108,9 @@ module Sequel
|
|
103
108
|
Sequel.elapsed_seconds_since(timer) > @connection_validation_timeout &&
|
104
109
|
!db.valid_connection?(conn)
|
105
110
|
|
106
|
-
|
107
|
-
|
111
|
+
case pool_type
|
112
|
+
when :sharded_threaded, :sharded_timed_queue
|
113
|
+
sync{@allocated[a.last].delete(Sequel.current)}
|
108
114
|
else
|
109
115
|
sync{@allocated.delete(Sequel.current)}
|
110
116
|
end
|
@@ -120,4 +126,3 @@ module Sequel
|
|
120
126
|
|
121
127
|
Database.register_extension(:connection_validator){|db| db.pool.extend(ConnectionValidator)}
|
122
128
|
end
|
123
|
-
|
@@ -56,7 +56,11 @@ module Sequel
|
|
56
56
|
|
57
57
|
# Dump the index cache to the filename given in Marshal format.
|
58
58
|
def dump_index_cache(file)
|
59
|
-
|
59
|
+
indexes = {}
|
60
|
+
@indexes.sort.each do |k, v|
|
61
|
+
indexes[k] = v
|
62
|
+
end
|
63
|
+
File.open(file, 'wb'){|f| f.write(Marshal.dump(indexes))}
|
60
64
|
nil
|
61
65
|
end
|
62
66
|
|