sequel 5.72.0 → 5.74.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG +28 -0
- data/README.rdoc +1 -1
- data/doc/migration.rdoc +14 -0
- data/doc/release_notes/5.73.0.txt +66 -0
- data/doc/release_notes/5.74.0.txt +45 -0
- data/lib/sequel/adapters/ibmdb.rb +1 -1
- data/lib/sequel/adapters/jdbc/sqlanywhere.rb +4 -0
- data/lib/sequel/adapters/jdbc/sqlserver.rb +4 -0
- data/lib/sequel/adapters/jdbc.rb +10 -6
- data/lib/sequel/adapters/shared/db2.rb +12 -0
- data/lib/sequel/adapters/shared/postgres.rb +3 -1
- data/lib/sequel/adapters/shared/sqlite.rb +0 -1
- data/lib/sequel/database/schema_methods.rb +3 -2
- data/lib/sequel/database/transactions.rb +6 -0
- data/lib/sequel/dataset/actions.rb +8 -6
- data/lib/sequel/extensions/migration.rb +14 -5
- data/lib/sequel/extensions/pg_json_ops.rb +52 -0
- data/lib/sequel/model/base.rb +20 -10
- data/lib/sequel/model/exceptions.rb +15 -3
- data/lib/sequel/plugins/column_encryption.rb +26 -5
- data/lib/sequel/plugins/paged_operations.rb +181 -0
- data/lib/sequel/version.rb +1 -1
- metadata +7 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 16c90ef17199e4e48f39edee069981ba0030cf63dc481b136f637b1fe069743e
|
4
|
+
data.tar.gz: 52063f32827a04c33173867207290da294878d898d25b80c37666cf66a54b780
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: d808534a13ae702a884524ceae4adcdb8eff9d46681a62d5b2c0d926b46279743350520248311c41f8a385c7fc66d17cbc44c7f1af52f1cffd356fca498d6bb2
|
7
|
+
data.tar.gz: 309d0f0c0a7c47a4f1acc90107443e7fd2aa51cb09038082354e822711b11986ae271bb784e710336cebafc83b8b5a99adf088bfd19e2353728d25de86f942c4
|
data/CHANGELOG
CHANGED
@@ -1,3 +1,31 @@
|
|
1
|
+
=== 5.74.0 (2023-11-01)
|
2
|
+
|
3
|
+
* Make generated columns show up in Database#schema when using SQLite 3.37+ (jeremyevans) (#2087)
|
4
|
+
|
5
|
+
* Add revert method for Sequel.migration blocks, to revert changes inside the block on up, and apply the changes on down (jeremyevans)
|
6
|
+
|
7
|
+
* Re-add is_json and is_not_json methods to the pg_json_ops extension, as the support was re-added in PostgreSQL 16 (jeremyevans)
|
8
|
+
|
9
|
+
* Avoid infinite loop when handling exceptions with a cause loop in jdbc adapter (jeremyevans)
|
10
|
+
|
11
|
+
=== 5.73.0 (2023-10-01)
|
12
|
+
|
13
|
+
* Handle disconnect errors in ibmdb and jdbc/db2 adapters (jeremyevans) (#2083)
|
14
|
+
|
15
|
+
* Support skipping transactions in Dataset#{import,paged_each} using :skip_transaction option (jeremyevans)
|
16
|
+
|
17
|
+
* Add Database#transaction :skip_transaction option to skip creating a transaction or savepoint (jeremyevans)
|
18
|
+
|
19
|
+
* Stop using a transaction for a single query if calling Dataset#import with a dataset (jeremyevans)
|
20
|
+
|
21
|
+
* Add paged_operations plugin for paged deletes and updates and other custom operations (jeremyevans) (#2080)
|
22
|
+
|
23
|
+
* Support to_tsquery: :websearch option to Dataset#full_text_search on PostgreSQL 11+ (jeremyevans) (#2075)
|
24
|
+
|
25
|
+
* Add MassAssignmentRestriction#model and #column for getting the model instance and related column for mass assignment errors (artofhuman, jeremyevans) (#2079)
|
26
|
+
|
27
|
+
* Stop using base64 library in column_encryption plugin (jeremyevans)
|
28
|
+
|
1
29
|
=== 5.72.0 (2023-09-01)
|
2
30
|
|
3
31
|
* Sort caches before marshalling when using schema_caching, index_caching, static_cache_cache, and pg_auto_constraint_validations (jeremyevans)
|
data/README.rdoc
CHANGED
@@ -927,7 +927,7 @@ Sequel fully supports the currently supported versions of Ruby (MRI) and JRuby.
|
|
927
927
|
support unsupported versions of Ruby or JRuby, but such support may be dropped in any
|
928
928
|
minor version if keeping it becomes a support issue. The minimum Ruby version
|
929
929
|
required to run the current version of Sequel is 1.9.2, and the minimum JRuby version is
|
930
|
-
9.
|
930
|
+
9.2.0.0 (due to the bigdecimal dependency).
|
931
931
|
|
932
932
|
== Maintainer
|
933
933
|
|
data/doc/migration.rdoc
CHANGED
@@ -90,6 +90,20 @@ the following methods:
|
|
90
90
|
|
91
91
|
If you use any other methods, you should create your own +down+ block.
|
92
92
|
|
93
|
+
To revert a migration created with +change+, you can copy the migration to a new file, and
|
94
|
+
replace +change+ with +revert+. For example, if you no longer need the artists table, you
|
95
|
+
can use the following migration. This will drop the artists table when migrating up, and
|
96
|
+
recreate it when migrating down:
|
97
|
+
|
98
|
+
Sequel.migration do
|
99
|
+
revert do
|
100
|
+
create_table(:artists) do
|
101
|
+
primary_key :id
|
102
|
+
String :name, null: false
|
103
|
+
end
|
104
|
+
end
|
105
|
+
end
|
106
|
+
|
93
107
|
In normal usage, when Sequel's migrator runs, it runs the +up+ blocks for all
|
94
108
|
migrations that have not yet been applied. However, you can use the <tt>-M</tt>
|
95
109
|
switch to specify the version to which to migrate, and if it is lower than the
|
@@ -0,0 +1,66 @@
|
|
1
|
+
= New Features
|
2
|
+
|
3
|
+
* A paged_operations plugin has been added, which adds support for
|
4
|
+
paged_datasets, paged_update, and paged_delete dataset methods.
|
5
|
+
This methods are designed to be used on large datasets, to split
|
6
|
+
a large query into separate smaller queries, to avoid locking the
|
7
|
+
related database table for a long period of time.
|
8
|
+
paged_update and paged_delete operate the same as update and delete,
|
9
|
+
returning the number of rows updated or deleted. paged_datasets yields
|
10
|
+
one or more datasets representing subsets of the receiver, with the
|
11
|
+
union of all of those datasets comprising all records in the receiver:
|
12
|
+
|
13
|
+
Album.plugin :paged_operations
|
14
|
+
|
15
|
+
Album.where{name > 'M'}.paged_datasets{|ds| puts ds.sql}
|
16
|
+
# Runs: SELECT id FROM albums WHERE (name <= 'M') ORDER BY id LIMIT 1 OFFSET 1000
|
17
|
+
# Prints: SELECT * FROM albums WHERE ((name <= 'M') AND ("id" < 1002))
|
18
|
+
# Runs: SELECT id FROM albums WHERE ((name <= 'M') AND (id >= 1002)) ORDER BY id LIMIT 1 OFFSET 1000
|
19
|
+
# Prints: SELECT * FROM albums WHERE ((name <= 'M') AND ("id" < 2002) AND (id >= 1002))
|
20
|
+
# ...
|
21
|
+
# Runs: SELECT id FROM albums WHERE ((name <= 'M') AND (id >= 10002)) ORDER BY id LIMIT 1 OFFSET 1000
|
22
|
+
# Prints: SELECT * FROM albums WHERE ((name <= 'M') AND (id >= 10002))
|
23
|
+
|
24
|
+
Album.where{name <= 'M'}.paged_update(:updated_at=>Sequel::CURRENT_TIMESTAMP)
|
25
|
+
# SELECT id FROM albums WHERE (name <= 'M') ORDER BY id LIMIT 1 OFFSET 1000
|
26
|
+
# UPDATE albums SET updated_at = CURRENT_TIMESTAMP WHERE ((name <= 'M') AND ("id" < 1002))
|
27
|
+
# SELECT id FROM albums WHERE ((name <= 'M') AND (id >= 1002)) ORDER BY id LIMIT 1 OFFSET 1000
|
28
|
+
# UPDATE albums SET updated_at = CURRENT_TIMESTAMP WHERE ((name <= 'M') AND ("id" < 2002) AND (id >= 1002))
|
29
|
+
# ...
|
30
|
+
# SELECT id FROM albums WHERE ((name <= 'M') AND (id >= 10002)) ORDER BY id LIMIT 1 OFFSET 1000
|
31
|
+
# UPDATE albums SET updated_at = CURRENT_TIMESTAMP WHERE ((name <= 'M') AND (id >= 10002))
|
32
|
+
|
33
|
+
Album.where{name > 'M'}.paged_delete
|
34
|
+
# SELECT id FROM albums WHERE (name > 'M') ORDER BY id LIMIT 1 OFFSET 1000
|
35
|
+
# DELETE FROM albums WHERE ((name > 'M') AND (id < 1002))
|
36
|
+
# SELECT id FROM albums WHERE (name > 'M') ORDER BY id LIMIT 1 OFFSET 1000
|
37
|
+
# DELETE FROM albums WHERE ((name > 'M') AND (id < 2002))
|
38
|
+
# ...
|
39
|
+
# SELECT id FROM albums WHERE (name > 'M') ORDER BY id LIMIT 1 OFFSET 1000
|
40
|
+
# DELETE FROM albums WHERE (name > 'M')
|
41
|
+
|
42
|
+
* A Dataset#transaction :skip_transaction option is now support to
|
43
|
+
checkout a connection from the pool without opening a transaction. This
|
44
|
+
makes it easier to handle cases where a transaction may or not be used
|
45
|
+
based on configuration/options. Dataset#import and Dataset#paged_each
|
46
|
+
now both support the :skip_transaction option to skip transactions.
|
47
|
+
|
48
|
+
* Dataset#full_text_search now supports the to_tsquery: :websearch option
|
49
|
+
on PostgreSQL 11+, to use the websearch_to_tsquery database function.
|
50
|
+
|
51
|
+
* The Sequel::MassAssignmentRestriction exception now supports model
|
52
|
+
and column methods to get provide additional information about the
|
53
|
+
exception. Additionally, the exception message now includes information
|
54
|
+
about the model class.
|
55
|
+
|
56
|
+
= Other Improvements
|
57
|
+
|
58
|
+
* The ibmdb and jdbc/db2 adapter now both handle disconnect errors
|
59
|
+
correctly, removing the related connection from the pool.
|
60
|
+
|
61
|
+
* Dataset#import no longer uses an explicit transaction if given a dataset
|
62
|
+
value, as in that case, only a single query is used.
|
63
|
+
|
64
|
+
* The column_encryption plugin no longer uses the base64 library. The
|
65
|
+
base64 library is moving from the standard library to a bundled gem
|
66
|
+
in Ruby 3.4, and this avoids having a dependency on it.
|
@@ -0,0 +1,45 @@
|
|
1
|
+
= New Features
|
2
|
+
|
3
|
+
* Sequel.migration blocks now support a revert method, which reverts
|
4
|
+
the changes in the block on up, and applies them on down. So if
|
5
|
+
you have a migration such as:
|
6
|
+
|
7
|
+
Sequel.migration do
|
8
|
+
change do
|
9
|
+
create_table :table do
|
10
|
+
# ...
|
11
|
+
end
|
12
|
+
end
|
13
|
+
end
|
14
|
+
|
15
|
+
and you later want to add a migration that drops the table, you
|
16
|
+
can use:
|
17
|
+
|
18
|
+
Sequel.migration do
|
19
|
+
revert do
|
20
|
+
create_table :table do
|
21
|
+
# ...
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
This will drop the table when migrating up, and create a table
|
27
|
+
with the given schema when migrating down.
|
28
|
+
|
29
|
+
* is_json and is_not_json methods have been added to the pg_json_ops
|
30
|
+
extension, for the IS [NOT] JSON operator supported in PostgreSQL
|
31
|
+
16+. These were previously added in Sequel 5.59.0, and removed
|
32
|
+
in Sequel 5.61.0 as support was removed in PostgreSQL 15 beta 4.
|
33
|
+
PostgreSQL 16 shipped with support for them, so support has been
|
34
|
+
recommitted to Sequel.
|
35
|
+
|
36
|
+
= Other Improvements
|
37
|
+
|
38
|
+
* SQLite generated columns now show up in Database#schema when using
|
39
|
+
SQLite 3.37+.
|
40
|
+
|
41
|
+
* Sequel now attempts to avoid an infinite loop in pathlogical cases
|
42
|
+
in the jdbc adapter, where the exception cause chain has a loop.
|
43
|
+
Additionally, if an exception is already recognized as a disconnect,
|
44
|
+
or an exception already responds to a getSQLState method, Sequel no
|
45
|
+
longer looks at the causes of the exception.
|
data/lib/sequel/adapters/jdbc.rb
CHANGED
@@ -396,11 +396,16 @@ module Sequel
|
|
396
396
|
|
397
397
|
def database_exception_sqlstate(exception, opts)
|
398
398
|
if database_exception_use_sqlstates?
|
399
|
-
|
400
|
-
exception = exception.cause
|
401
|
-
return exception.getSQLState if exception.respond_to?(:getSQLState)
|
402
|
-
end
|
399
|
+
_database_exception_sqlstate(exception, opts)
|
403
400
|
end
|
401
|
+
end
|
402
|
+
|
403
|
+
def _database_exception_sqlstate(exception, opts)
|
404
|
+
16.times do
|
405
|
+
return exception.getSQLState if exception.respond_to?(:getSQLState)
|
406
|
+
break unless exception.respond_to?(:cause) && (exception = exception.cause)
|
407
|
+
end
|
408
|
+
|
404
409
|
nil
|
405
410
|
end
|
406
411
|
|
@@ -415,8 +420,7 @@ module Sequel
|
|
415
420
|
|
416
421
|
# Raise a disconnect error if the SQL state of the cause of the exception indicates so.
|
417
422
|
def disconnect_error?(exception, opts)
|
418
|
-
|
419
|
-
super || (cause.respond_to?(:getSQLState) && cause.getSQLState =~ /^08/)
|
423
|
+
super || (_database_exception_sqlstate(exception, opts) =~ /^08/)
|
420
424
|
end
|
421
425
|
|
422
426
|
# Execute the prepared statement. If the provided name is a
|
@@ -215,6 +215,18 @@ module Sequel
|
|
215
215
|
DATABASE_ERROR_REGEXPS
|
216
216
|
end
|
217
217
|
|
218
|
+
DISCONNECT_SQL_STATES = %w'40003 08001 08003'.freeze
|
219
|
+
def disconnect_error?(exception, opts)
|
220
|
+
sqlstate = database_exception_sqlstate(exception, opts)
|
221
|
+
|
222
|
+
case sqlstate
|
223
|
+
when *DISCONNECT_SQL_STATES
|
224
|
+
true
|
225
|
+
else
|
226
|
+
super
|
227
|
+
end
|
228
|
+
end
|
229
|
+
|
218
230
|
# DB2 has issues with quoted identifiers, so
|
219
231
|
# turn off database quoting by default.
|
220
232
|
def quote_identifiers_default
|
@@ -1798,7 +1798,7 @@ module Sequel
|
|
1798
1798
|
# :phrase :: Similar to :plain, but also adding an ILIKE filter to ensure that
|
1799
1799
|
# returned rows also include the exact phrase used.
|
1800
1800
|
# :rank :: Set to true to order by the rank, so that closer matches are returned first.
|
1801
|
-
# :to_tsquery :: Can be set to :plain or :
|
1801
|
+
# :to_tsquery :: Can be set to :plain, :phrase, or :websearch to specify the function to use to
|
1802
1802
|
# convert the terms to a ts_query.
|
1803
1803
|
# :tsquery :: Specifies the terms argument is already a valid SQL expression returning a
|
1804
1804
|
# tsquery, and can be used directly in the query.
|
@@ -1818,6 +1818,8 @@ module Sequel
|
|
1818
1818
|
query_func = case to_tsquery = opts[:to_tsquery]
|
1819
1819
|
when :phrase, :plain
|
1820
1820
|
:"#{to_tsquery}to_tsquery"
|
1821
|
+
when :websearch
|
1822
|
+
:"websearch_to_tsquery"
|
1821
1823
|
else
|
1822
1824
|
(opts[:phrase] || opts[:plain]) ? :plainto_tsquery : :to_tsquery
|
1823
1825
|
end
|
@@ -504,7 +504,6 @@ module Sequel
|
|
504
504
|
# table_xinfo PRAGMA used, remove hidden columns
|
505
505
|
# that are not generated columns
|
506
506
|
if row[:generated] = (row.delete(:hidden) != 0)
|
507
|
-
next unless row[:type].end_with?(' GENERATED ALWAYS')
|
508
507
|
row[:type] = row[:type].sub(' GENERATED ALWAYS', '')
|
509
508
|
end
|
510
509
|
end
|
@@ -712,8 +712,9 @@ module Sequel
|
|
712
712
|
e = options[:ignore_index_errors] || options[:if_not_exists]
|
713
713
|
generator.indexes.each do |index|
|
714
714
|
begin
|
715
|
-
|
716
|
-
|
715
|
+
transaction(:savepoint=>:only, :skip_transaction=>supports_transactional_ddl? == false) do
|
716
|
+
index_sql_list(name, [index]).each{|sql| execute_ddl(sql)}
|
717
|
+
end
|
717
718
|
rescue Error
|
718
719
|
raise unless e
|
719
720
|
end
|
@@ -166,6 +166,8 @@ module Sequel
|
|
166
166
|
# uses :auto_savepoint, you can set this to false to not use a savepoint.
|
167
167
|
# If the value given for this option is :only, it will only create a
|
168
168
|
# savepoint if it is inside a transaction.
|
169
|
+
# :skip_transaction :: If set, do not actually open a transaction or savepoint,
|
170
|
+
# just checkout a connection and yield it.
|
169
171
|
#
|
170
172
|
# PostgreSQL specific options:
|
171
173
|
#
|
@@ -193,6 +195,10 @@ module Sequel
|
|
193
195
|
end
|
194
196
|
else
|
195
197
|
synchronize(opts[:server]) do |conn|
|
198
|
+
if opts[:skip_transaction]
|
199
|
+
return yield(conn)
|
200
|
+
end
|
201
|
+
|
196
202
|
if opts[:savepoint] == :only
|
197
203
|
if supports_savepoints?
|
198
204
|
if _trans(conn)
|
@@ -356,9 +356,11 @@ module Sequel
|
|
356
356
|
# This does not have an effect if +values+ is a Dataset.
|
357
357
|
# :server :: Set the server/shard to use for the transaction and insert
|
358
358
|
# queries.
|
359
|
+
# :skip_transaction :: Do not use a transaction even when using multiple
|
360
|
+
# INSERT queries.
|
359
361
|
# :slice :: Same as :commit_every, :commit_every takes precedence.
|
360
362
|
def import(columns, values, opts=OPTS)
|
361
|
-
return
|
363
|
+
return insert(columns, values) if values.is_a?(Dataset)
|
362
364
|
|
363
365
|
return if values.empty?
|
364
366
|
raise(Error, 'Using Sequel::Dataset#import with an empty column array is not allowed') if columns.empty?
|
@@ -588,6 +590,8 @@ module Sequel
|
|
588
590
|
# if your ORDER BY expressions are not simple columns, if they contain
|
589
591
|
# qualified identifiers that would be ambiguous unqualified, if they contain
|
590
592
|
# any identifiers that are aliased in SELECT, and potentially other cases.
|
593
|
+
# :skip_transaction :: Do not use a transaction. This can be useful if you want to prevent
|
594
|
+
# a lock on the database table, at the expense of consistency.
|
591
595
|
#
|
592
596
|
# Examples:
|
593
597
|
#
|
@@ -1111,11 +1115,9 @@ module Sequel
|
|
1111
1115
|
# are provided. When only a single value or statement is provided, then yield
|
1112
1116
|
# without using a transaction.
|
1113
1117
|
def _import_transaction(values, trans_opts, &block)
|
1114
|
-
|
1115
|
-
|
1116
|
-
|
1117
|
-
yield
|
1118
|
-
end
|
1118
|
+
# OK to mutate trans_opts as it is generated by _import
|
1119
|
+
trans_opts[:skip_transaction] = true if values.length <= 1
|
1120
|
+
@db.transaction(trans_opts, &block)
|
1119
1121
|
end
|
1120
1122
|
|
1121
1123
|
# Internals of +select_hash+ and +select_hash_groups+
|
@@ -159,6 +159,19 @@ module Sequel
|
|
159
159
|
migration.up = block
|
160
160
|
migration.down = MigrationReverser.new.reverse(&block)
|
161
161
|
end
|
162
|
+
|
163
|
+
# Creates a revert migration. This is the same as creating
|
164
|
+
# the same block with +down+, but it also calls the block and attempts
|
165
|
+
# to create a +up+ block that will reverse the changes made by
|
166
|
+
# the block. This is designed to revert the changes in the
|
167
|
+
# provided block.
|
168
|
+
#
|
169
|
+
# There are no guarantees that this will work perfectly
|
170
|
+
# in all cases, but it works for some simple cases.
|
171
|
+
def revert(&block)
|
172
|
+
migration.down = block
|
173
|
+
migration.up = MigrationReverser.new.reverse(&block)
|
174
|
+
end
|
162
175
|
end
|
163
176
|
|
164
177
|
# Handles the reversing of reversible migrations. Basically records
|
@@ -482,11 +495,7 @@ module Sequel
|
|
482
495
|
@use_transactions
|
483
496
|
end
|
484
497
|
|
485
|
-
|
486
|
-
db.transaction(&block)
|
487
|
-
else
|
488
|
-
yield
|
489
|
-
end
|
498
|
+
db.transaction(:skip_transaction=>use_trans == false, &block)
|
490
499
|
end
|
491
500
|
|
492
501
|
# Load the migration file, raising an exception if the file does not define
|
@@ -123,6 +123,15 @@
|
|
123
123
|
# c = Sequel.pg_jsonb_op(:c)
|
124
124
|
# DB[:t].update(c['key1'] => 1.to_json, c['key2'] => "a".to_json)
|
125
125
|
#
|
126
|
+
# On PostgreSQL 16+, the <tt>IS [NOT] JSON</tt> operator is supported:
|
127
|
+
#
|
128
|
+
# j.is_json # j IS JSON
|
129
|
+
# j.is_json(type: :object) # j IS JSON OBJECT
|
130
|
+
# j.is_json(type: :object, unique: true) # j IS JSON OBJECT WITH UNIQUE
|
131
|
+
# j.is_not_json # j IS NOT JSON
|
132
|
+
# j.is_not_json(type: :array) # j IS NOT JSON ARRAY
|
133
|
+
# j.is_not_json(unique: true) # j IS NOT JSON WITH UNIQUE
|
134
|
+
#
|
126
135
|
# If you are also using the pg_json extension, you should load it before
|
127
136
|
# loading this extension. Doing so will allow you to use the #op method on
|
128
137
|
# JSONHash, JSONHarray, JSONBHash, and JSONBArray, allowing you to perform json/jsonb operations
|
@@ -151,6 +160,18 @@ module Sequel
|
|
151
160
|
GET_PATH = ["(".freeze, " #> ".freeze, ")".freeze].freeze
|
152
161
|
GET_PATH_TEXT = ["(".freeze, " #>> ".freeze, ")".freeze].freeze
|
153
162
|
|
163
|
+
IS_JSON = ["(".freeze, " IS JSON".freeze, "".freeze, ")".freeze].freeze
|
164
|
+
IS_NOT_JSON = ["(".freeze, " IS NOT JSON".freeze, "".freeze, ")".freeze].freeze
|
165
|
+
EMPTY_STRING = Sequel::LiteralString.new('').freeze
|
166
|
+
WITH_UNIQUE = Sequel::LiteralString.new(' WITH UNIQUE').freeze
|
167
|
+
IS_JSON_MAP = {
|
168
|
+
nil => EMPTY_STRING,
|
169
|
+
:value => Sequel::LiteralString.new(' VALUE').freeze,
|
170
|
+
:scalar => Sequel::LiteralString.new(' SCALAR').freeze,
|
171
|
+
:object => Sequel::LiteralString.new(' OBJECT').freeze,
|
172
|
+
:array => Sequel::LiteralString.new(' ARRAY').freeze
|
173
|
+
}.freeze
|
174
|
+
|
154
175
|
# Get JSON array element or object field as json. If an array is given,
|
155
176
|
# gets the object at the specified path.
|
156
177
|
#
|
@@ -233,6 +254,30 @@ module Sequel
|
|
233
254
|
end
|
234
255
|
end
|
235
256
|
|
257
|
+
# Return whether the json object can be parsed as JSON.
|
258
|
+
#
|
259
|
+
# Options:
|
260
|
+
# :type :: Check whether the json object can be parsed as a specific type
|
261
|
+
# of JSON (:value, :scalar, :object, :array).
|
262
|
+
# :unique :: Check JSON objects for unique keys.
|
263
|
+
#
|
264
|
+
# json_op.is_json # json IS JSON
|
265
|
+
# json_op.is_json(type: :object) # json IS JSON OBJECT
|
266
|
+
# json_op.is_json(unique: true) # json IS JSON WITH UNIQUE
|
267
|
+
def is_json(opts=OPTS)
|
268
|
+
_is_json(IS_JSON, opts)
|
269
|
+
end
|
270
|
+
|
271
|
+
# Return whether the json object cannot be parsed as JSON. The opposite
|
272
|
+
# of #is_json. See #is_json for options.
|
273
|
+
#
|
274
|
+
# json_op.is_not_json # json IS NOT JSON
|
275
|
+
# json_op.is_not_json(type: :object) # json IS NOT JSON OBJECT
|
276
|
+
# json_op.is_not_json(unique: true) # json IS NOT JSON WITH UNIQUE
|
277
|
+
def is_not_json(opts=OPTS)
|
278
|
+
_is_json(IS_NOT_JSON, opts)
|
279
|
+
end
|
280
|
+
|
236
281
|
# Returns a set of keys AS text in the json object.
|
237
282
|
#
|
238
283
|
# json_op.keys # json_object_keys(json)
|
@@ -286,6 +331,13 @@ module Sequel
|
|
286
331
|
|
287
332
|
private
|
288
333
|
|
334
|
+
# Internals of IS [NOT] JSON support
|
335
|
+
def _is_json(lit_array, opts)
|
336
|
+
raise Error, "invalid is_json :type option: #{opts[:type].inspect}" unless type = IS_JSON_MAP[opts[:type]]
|
337
|
+
unique = opts[:unique] ? WITH_UNIQUE : EMPTY_STRING
|
338
|
+
Sequel::SQL::BooleanExpression.new(:NOOP, Sequel::SQL::PlaceholderLiteralString.new(lit_array, [self, type, unique]))
|
339
|
+
end
|
340
|
+
|
289
341
|
# Return a placeholder literal with the given str and args, wrapped
|
290
342
|
# in an JSONOp or JSONBOp, used by operators that return json or jsonb.
|
291
343
|
def json_op(str, args)
|
data/lib/sequel/model/base.rb
CHANGED
@@ -1945,8 +1945,10 @@ module Sequel
|
|
1945
1945
|
end
|
1946
1946
|
|
1947
1947
|
# If transactions should be used, wrap the yield in a transaction block.
|
1948
|
-
def checked_transaction(opts=OPTS)
|
1949
|
-
|
1948
|
+
def checked_transaction(opts=OPTS, &block)
|
1949
|
+
h = {:server=>this_server}.merge!(opts)
|
1950
|
+
h[:skip_transaction] = true unless use_transaction?(opts)
|
1951
|
+
db.transaction(h, &block)
|
1950
1952
|
end
|
1951
1953
|
|
1952
1954
|
# Change the value of the column to given value, recording the change.
|
@@ -2031,19 +2033,20 @@ module Sequel
|
|
2031
2033
|
meths = setter_methods(type)
|
2032
2034
|
strict = strict_param_setting
|
2033
2035
|
hash.each do |k,v|
|
2036
|
+
k = k.to_s
|
2034
2037
|
m = "#{k}="
|
2035
2038
|
if meths.include?(m)
|
2036
2039
|
set_column_value(m, v)
|
2037
2040
|
elsif strict
|
2038
2041
|
# Avoid using respond_to? or creating symbols from user input
|
2039
2042
|
if public_methods.map(&:to_s).include?(m)
|
2040
|
-
if Array(model.primary_key).map(&:to_s).member?(k
|
2041
|
-
raise MassAssignmentRestriction
|
2043
|
+
if Array(model.primary_key).map(&:to_s).member?(k) && model.restrict_primary_key?
|
2044
|
+
raise MassAssignmentRestriction.create("#{k} is a restricted primary key", self, k)
|
2042
2045
|
else
|
2043
|
-
raise MassAssignmentRestriction
|
2046
|
+
raise MassAssignmentRestriction.create("#{k} is a restricted column", self, k)
|
2044
2047
|
end
|
2045
2048
|
else
|
2046
|
-
raise MassAssignmentRestriction
|
2049
|
+
raise MassAssignmentRestriction.create("method #{m} doesn't exist", self, k)
|
2047
2050
|
end
|
2048
2051
|
end
|
2049
2052
|
end
|
@@ -2147,8 +2150,9 @@ module Sequel
|
|
2147
2150
|
# # DELETE FROM artists WHERE (id = 2)
|
2148
2151
|
# # ...
|
2149
2152
|
def destroy
|
2150
|
-
|
2151
|
-
|
2153
|
+
@db.transaction(:server=>opts[:server], :skip_transaction=>model.use_transactions == false) do
|
2154
|
+
all(&:destroy).length
|
2155
|
+
end
|
2152
2156
|
end
|
2153
2157
|
|
2154
2158
|
# If there is no order already defined on this dataset, order it by
|
@@ -2228,11 +2232,17 @@ module Sequel
|
|
2228
2232
|
|
2229
2233
|
private
|
2230
2234
|
|
2235
|
+
# Return the dataset ordered by the model's primary key. This should not
|
2236
|
+
# be used if the model does not have a primary key.
|
2237
|
+
def _force_primary_key_order
|
2238
|
+
cached_dataset(:_pk_order_ds){order(*model.primary_key)}
|
2239
|
+
end
|
2240
|
+
|
2231
2241
|
# If the dataset is not already ordered, and the model has a primary key,
|
2232
2242
|
# return a clone ordered by the primary key.
|
2233
2243
|
def _primary_key_order
|
2234
|
-
if @opts[:order].nil? && model &&
|
2235
|
-
|
2244
|
+
if @opts[:order].nil? && model && model.primary_key
|
2245
|
+
_force_primary_key_order
|
2236
2246
|
end
|
2237
2247
|
end
|
2238
2248
|
|
@@ -24,11 +24,23 @@ module Sequel
|
|
24
24
|
UndefinedAssociation = Class.new(Error)
|
25
25
|
).name
|
26
26
|
|
27
|
-
(
|
28
27
|
# Raised when a mass assignment method is called in strict mode with either a restricted column
|
29
28
|
# or a column without a setter method.
|
30
|
-
MassAssignmentRestriction
|
31
|
-
|
29
|
+
class MassAssignmentRestriction < Error
|
30
|
+
# The Sequel::Model object related to this exception.
|
31
|
+
attr_reader :model
|
32
|
+
|
33
|
+
# The column related to this exception, as a string.
|
34
|
+
attr_reader :column
|
35
|
+
|
36
|
+
# Create an instance of this class with the model and column set.
|
37
|
+
def self.create(msg, model, column)
|
38
|
+
e = new("#{msg} for class #{model.class.inspect}")
|
39
|
+
e.instance_variable_set(:@model, model)
|
40
|
+
e.instance_variable_set(:@column, column)
|
41
|
+
e
|
42
|
+
end
|
43
|
+
end
|
32
44
|
|
33
45
|
# Exception class raised when +raise_on_save_failure+ is set and validation fails
|
34
46
|
class ValidationFailed < Error
|
@@ -31,7 +31,6 @@ rescue RuntimeError, OpenSSL::Cipher::CipherError
|
|
31
31
|
# :nocov:
|
32
32
|
end
|
33
33
|
|
34
|
-
require 'base64'
|
35
34
|
require 'securerandom'
|
36
35
|
|
37
36
|
module Sequel
|
@@ -375,7 +374,7 @@ module Sequel
|
|
375
374
|
# Decrypt using any supported format and any available key.
|
376
375
|
def decrypt(data)
|
377
376
|
begin
|
378
|
-
data =
|
377
|
+
data = urlsafe_decode64(data)
|
379
378
|
rescue ArgumentError
|
380
379
|
raise Error, "Unable to decode encrypted column: invalid base64"
|
381
380
|
end
|
@@ -448,7 +447,7 @@ module Sequel
|
|
448
447
|
# The prefix string of columns for the given search type and the first configured encryption key.
|
449
448
|
# Used to find values that do not use this prefix in order to perform reencryption.
|
450
449
|
def current_key_prefix(search_type)
|
451
|
-
|
450
|
+
urlsafe_encode64("#{search_type.chr}\0#{@key_id.chr}")
|
452
451
|
end
|
453
452
|
|
454
453
|
# The prefix values to search for the given data (an array of strings), assuming the column uses
|
@@ -472,11 +471,33 @@ module Sequel
|
|
472
471
|
|
473
472
|
private
|
474
473
|
|
474
|
+
if RUBY_VERSION >= '2.4'
|
475
|
+
def decode64(str)
|
476
|
+
str.unpack1("m0")
|
477
|
+
end
|
478
|
+
# :nocov:
|
479
|
+
else
|
480
|
+
def decode64(str)
|
481
|
+
str.unpack("m0")[0]
|
482
|
+
end
|
483
|
+
# :nocov:
|
484
|
+
end
|
485
|
+
|
486
|
+
def urlsafe_encode64(bin)
|
487
|
+
str = [bin].pack("m0")
|
488
|
+
str.tr!("+/", "-_")
|
489
|
+
str
|
490
|
+
end
|
491
|
+
|
492
|
+
def urlsafe_decode64(str)
|
493
|
+
decode64(str.tr("-_", "+/"))
|
494
|
+
end
|
495
|
+
|
475
496
|
# An array of strings, one for each configured encryption key, to find encypted values matching
|
476
497
|
# the given data and search format.
|
477
498
|
def _search_prefixes(data, search_type)
|
478
499
|
@key_map.map do |key_id, (key, _)|
|
479
|
-
|
500
|
+
urlsafe_encode64(_search_prefix(data, search_type, key_id, key))
|
480
501
|
end
|
481
502
|
end
|
482
503
|
|
@@ -509,7 +530,7 @@ module Sequel
|
|
509
530
|
cipher_text << cipher.update(data) if data_size > 0
|
510
531
|
cipher_text << cipher.final
|
511
532
|
|
512
|
-
|
533
|
+
urlsafe_encode64("#{prefix}#{random_data}#{cipher_iv}#{cipher.auth_tag}#{cipher_text}")
|
513
534
|
end
|
514
535
|
end
|
515
536
|
|
@@ -0,0 +1,181 @@
|
|
1
|
+
# frozen-string-literal: true
|
2
|
+
|
3
|
+
module Sequel
|
4
|
+
module Plugins
|
5
|
+
# The paged_operations plugin adds +paged_update+ and
|
6
|
+
# +paged_delete+ dataset methods. These behave similarly to
|
7
|
+
# the default +update+ and +delete+ dataset methods, except
|
8
|
+
# that the update or deletion is done in potentially multiple
|
9
|
+
# queries (by default, affecting 1000 rows per query).
|
10
|
+
# For a large table, this prevents the change from
|
11
|
+
# locking the table for a long period of time.
|
12
|
+
#
|
13
|
+
# Because the point of this is to prevent locking tables for
|
14
|
+
# long periods of time, the separate queries are not contained
|
15
|
+
# in a transaction, which means if a later query fails,
|
16
|
+
# earlier queries will still be committed. You could prevent
|
17
|
+
# this by using a transaction manually, but that defeats the
|
18
|
+
# purpose of using these methods.
|
19
|
+
#
|
20
|
+
# Examples:
|
21
|
+
#
|
22
|
+
# Album.where{name <= 'M'}.paged_update(updated_at: Sequel::CURRENT_TIMESTAMP)
|
23
|
+
# # SELECT id FROM albums WHERE (name <= 'M') ORDER BY id LIMIT 1 OFFSET 1000
|
24
|
+
# # UPDATE albums SET updated_at = CURRENT_TIMESTAMP WHERE ((name <= 'M') AND ("id" < 1002))
|
25
|
+
# # SELECT id FROM albums WHERE ((name <= 'M') AND (id >= 1002)) ORDER BY id LIMIT 1 OFFSET 1000
|
26
|
+
# # UPDATE albums SET updated_at = CURRENT_TIMESTAMP WHERE ((name <= 'M') AND ("id" < 2002) AND (id >= 1002))
|
27
|
+
# # ...
|
28
|
+
# # SELECT id FROM albums WHERE ((name <= 'M') AND (id >= 10002)) ORDER BY id LIMIT 1 OFFSET 1000
|
29
|
+
# # UPDATE albums SET updated_at = CURRENT_TIMESTAMP WHERE ((name <= 'M') AND (id >= 10002))
|
30
|
+
#
|
31
|
+
# Album.where{name > 'M'}.paged_delete
|
32
|
+
# # SELECT id FROM albums WHERE (name > 'M') ORDER BY id LIMIT 1 OFFSET 1000
|
33
|
+
# # DELETE FROM albums WHERE ((name > 'M') AND (id < 1002))
|
34
|
+
# # SELECT id FROM albums WHERE (name > 'M') ORDER BY id LIMIT 1 OFFSET 1000
|
35
|
+
# # DELETE FROM albums WHERE ((name > 'M') AND (id < 2002))
|
36
|
+
# # ...
|
37
|
+
# # SELECT id FROM albums WHERE (name > 'M') ORDER BY id LIMIT 1 OFFSET 1000
|
38
|
+
# # DELETE FROM albums WHERE (name > 'M')
|
39
|
+
#
|
40
|
+
# The plugin also adds a +paged_datasets+ method that will yield
|
41
|
+
# separate datasets limited in size that in total handle all
|
42
|
+
# rows in the receiver:
|
43
|
+
#
|
44
|
+
# Album.where{name > 'M'}.paged_datasets{|ds| puts ds.sql}
|
45
|
+
# # Runs: SELECT id FROM albums WHERE (name <= 'M') ORDER BY id LIMIT 1 OFFSET 1000
|
46
|
+
# # Prints: SELECT * FROM albums WHERE ((name <= 'M') AND ("id" < 1002))
|
47
|
+
# # Runs: SELECT id FROM albums WHERE ((name <= 'M') AND (id >= 1002)) ORDER BY id LIMIT 1 OFFSET 1000
|
48
|
+
# # Prints: SELECT * FROM albums WHERE ((name <= 'M') AND ("id" < 2002) AND (id >= 1002))
|
49
|
+
# # ...
|
50
|
+
# # Runs: SELECT id FROM albums WHERE ((name <= 'M') AND (id >= 10002)) ORDER BY id LIMIT 1 OFFSET 1000
|
51
|
+
# # Prints: SELECT * FROM albums WHERE ((name <= 'M') AND (id >= 10002))
|
52
|
+
#
|
53
|
+
# To set the number of rows per page, pass a :rows_per_page option:
|
54
|
+
#
|
55
|
+
# Album.where{name <= 'M'}.paged_update({x: Sequel[:x] + 1}, rows_per_page: 4)
|
56
|
+
# # SELECT id FROM albums WHERE (name <= 'M') ORDER BY id LIMIT 1 OFFSET 4
|
57
|
+
# # UPDATE albums SET x = x + 1 WHERE ((name <= 'M') AND ("id" < 5))
|
58
|
+
# # SELECT id FROM albums WHERE ((name <= 'M') AND (id >= 5)) ORDER BY id LIMIT 1 OFFSET 4
|
59
|
+
# # UPDATE albums SET x = x + 1 WHERE ((name <= 'M') AND ("id" < 9) AND (id >= 5))
|
60
|
+
# # ...
|
61
|
+
# # SELECT id FROM albums WHERE ((name <= 'M') AND (id >= 12345)) ORDER BY id LIMIT 1 OFFSET 4
|
62
|
+
# # UPDATE albums SET x = x + 1 WHERE ((name <= 'M') AND (id >= 12345))
|
63
|
+
#
|
64
|
+
# You should avoid using +paged_update+ or +paged_datasets+
|
65
|
+
# with updates that modify the primary key, as such usage is
|
66
|
+
# not supported by this plugin.
|
67
|
+
#
|
68
|
+
# This plugin only supports models with scalar primary keys.
|
69
|
+
#
|
70
|
+
# Usage:
|
71
|
+
#
|
72
|
+
# # Make all model subclasses support paged update/delete/datasets
|
73
|
+
# # (called before loading subclasses)
|
74
|
+
# Sequel::Model.plugin :paged_operations
|
75
|
+
#
|
76
|
+
# # Make the Album class support paged update/delete/datasets
|
77
|
+
# Album.plugin :paged_operations
|
78
|
+
module PagedOperations
|
79
|
+
module ClassMethods
|
80
|
+
Plugins.def_dataset_methods(self, [:paged_datasets, :paged_delete, :paged_update])
|
81
|
+
end
|
82
|
+
|
83
|
+
module DatasetMethods
|
84
|
+
# Yield datasets for subsets of the receiver that are limited
|
85
|
+
# to no more than 1000 rows (you can configure the number of
|
86
|
+
# rows using +:rows_per_page+).
|
87
|
+
#
|
88
|
+
# Options:
|
89
|
+
# :rows_per_page :: The maximum number of rows in each yielded dataset
|
90
|
+
# (unless concurrent modifications are made to the table).
|
91
|
+
def paged_datasets(opts=OPTS)
|
92
|
+
unless defined?(yield)
|
93
|
+
return enum_for(:paged_datasets, opts)
|
94
|
+
end
|
95
|
+
|
96
|
+
pk = _paged_operations_pk(:paged_update)
|
97
|
+
base_offset_ds = offset_ds = _paged_operations_offset_ds(opts)
|
98
|
+
first = nil
|
99
|
+
|
100
|
+
while last = offset_ds.get(pk)
|
101
|
+
ds = where(pk < last)
|
102
|
+
ds = ds.where(pk >= first) if first
|
103
|
+
yield ds
|
104
|
+
first = last
|
105
|
+
offset_ds = base_offset_ds.where(pk >= first)
|
106
|
+
end
|
107
|
+
|
108
|
+
ds = self
|
109
|
+
ds = ds.where(pk >= first) if first
|
110
|
+
yield ds
|
111
|
+
nil
|
112
|
+
end
|
113
|
+
|
114
|
+
# Delete all rows of the dataset using using multiple queries so that
|
115
|
+
# no more than 1000 rows are deleted at a time (you can configure the
|
116
|
+
# number of rows using +:rows_per_page+).
|
117
|
+
#
|
118
|
+
# Options:
|
119
|
+
# :rows_per_page :: The maximum number of rows affected by each DELETE query
|
120
|
+
# (unless concurrent modifications are made to the table).
|
121
|
+
def paged_delete(opts=OPTS)
|
122
|
+
if (db.database_type == :oracle && !supports_fetch_next_rows?) || (db.database_type == :mssql && !is_2012_or_later?)
|
123
|
+
raise Error, "paged_delete is not supported on MSSQL/Oracle when using emulated offsets"
|
124
|
+
end
|
125
|
+
pk = _paged_operations_pk(:paged_delete)
|
126
|
+
rows_deleted = 0
|
127
|
+
offset_ds = _paged_operations_offset_ds(opts)
|
128
|
+
while last = offset_ds.get(pk)
|
129
|
+
rows_deleted += where(pk < last).delete
|
130
|
+
end
|
131
|
+
rows_deleted + delete
|
132
|
+
end
|
133
|
+
|
134
|
+
# Update all rows of the dataset using using multiple queries so that
|
135
|
+
# no more than 1000 rows are updated at a time (you can configure the
|
136
|
+
# number of rows using +:rows_per_page+). All arguments are
|
137
|
+
# passed to Dataset#update.
|
138
|
+
#
|
139
|
+
# Options:
|
140
|
+
# :rows_per_page :: The maximum number of rows affected by each UPDATE query
|
141
|
+
# (unless concurrent modifications are made to the table).
|
142
|
+
def paged_update(values, opts=OPTS)
|
143
|
+
rows_updated = 0
|
144
|
+
paged_datasets(opts) do |ds|
|
145
|
+
rows_updated += ds.update(values)
|
146
|
+
end
|
147
|
+
rows_updated
|
148
|
+
end
|
149
|
+
|
150
|
+
private
|
151
|
+
|
152
|
+
# Run some basic checks common to paged_{datasets,delete,update}
|
153
|
+
# and return the primary key to operate on as a Sequel::Identifier.
|
154
|
+
def _paged_operations_pk(meth)
|
155
|
+
raise Error, "cannot use #{meth} if dataset has a limit or offset" if @opts[:limit] || @opts[:offset]
|
156
|
+
if db.database_type == :db2 && db.offset_strategy == :emulate
|
157
|
+
raise Error, "the paged_operations plugin is not supported on DB2 when using emulated offsets, set the :offset_strategy Database option to 'limit_offset' or 'offset_fetch'"
|
158
|
+
end
|
159
|
+
|
160
|
+
case pk = model.primary_key
|
161
|
+
when Symbol
|
162
|
+
Sequel.identifier(pk)
|
163
|
+
when Array
|
164
|
+
raise Error, "cannot use #{meth} on a model with a composite primary key"
|
165
|
+
else
|
166
|
+
raise Error, "cannot use #{meth} on a model without a primary key"
|
167
|
+
end
|
168
|
+
end
|
169
|
+
|
170
|
+
# The dataset that will be used by paged_{datasets,delete,update}
|
171
|
+
# to get the upper limit for the next query.
|
172
|
+
def _paged_operations_offset_ds(opts)
|
173
|
+
if rows_per_page = opts[:rows_per_page]
|
174
|
+
raise Error, ":rows_per_page option must be at least 1" unless rows_per_page >= 1
|
175
|
+
end
|
176
|
+
_force_primary_key_order.offset(rows_per_page || 1000)
|
177
|
+
end
|
178
|
+
end
|
179
|
+
end
|
180
|
+
end
|
181
|
+
end
|
data/lib/sequel/version.rb
CHANGED
@@ -6,7 +6,7 @@ module Sequel
|
|
6
6
|
|
7
7
|
# The minor version of Sequel. Bumped for every non-patch level
|
8
8
|
# release, generally around once a month.
|
9
|
-
MINOR =
|
9
|
+
MINOR = 74
|
10
10
|
|
11
11
|
# The tiny version of Sequel. Usually 0, only bumped for bugfix
|
12
12
|
# releases that fix regressions from previous versions.
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: sequel
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 5.
|
4
|
+
version: 5.74.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Jeremy Evans
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-
|
11
|
+
date: 2023-11-01 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bigdecimal
|
@@ -219,6 +219,8 @@ extra_rdoc_files:
|
|
219
219
|
- doc/release_notes/5.70.0.txt
|
220
220
|
- doc/release_notes/5.71.0.txt
|
221
221
|
- doc/release_notes/5.72.0.txt
|
222
|
+
- doc/release_notes/5.73.0.txt
|
223
|
+
- doc/release_notes/5.74.0.txt
|
222
224
|
- doc/release_notes/5.8.0.txt
|
223
225
|
- doc/release_notes/5.9.0.txt
|
224
226
|
files:
|
@@ -319,6 +321,8 @@ files:
|
|
319
321
|
- doc/release_notes/5.70.0.txt
|
320
322
|
- doc/release_notes/5.71.0.txt
|
321
323
|
- doc/release_notes/5.72.0.txt
|
324
|
+
- doc/release_notes/5.73.0.txt
|
325
|
+
- doc/release_notes/5.74.0.txt
|
322
326
|
- doc/release_notes/5.8.0.txt
|
323
327
|
- doc/release_notes/5.9.0.txt
|
324
328
|
- doc/schema_modification.rdoc
|
@@ -572,6 +576,7 @@ files:
|
|
572
576
|
- lib/sequel/plugins/nested_attributes.rb
|
573
577
|
- lib/sequel/plugins/optimistic_locking.rb
|
574
578
|
- lib/sequel/plugins/optimistic_locking_base.rb
|
579
|
+
- lib/sequel/plugins/paged_operations.rb
|
575
580
|
- lib/sequel/plugins/pg_array_associations.rb
|
576
581
|
- lib/sequel/plugins/pg_auto_constraint_validations.rb
|
577
582
|
- lib/sequel/plugins/pg_row.rb
|