pg_ha_migrations 1.8.0 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/ci.yml +5 -7
- data/.ruby-version +1 -1
- data/Appraisals +6 -6
- data/Gemfile +0 -1
- data/README.md +108 -50
- data/Rakefile +2 -0
- data/bin/setup +3 -1
- data/gemfiles/rails_7.1.gemfile +1 -1
- data/gemfiles/{rails_6.1.gemfile → rails_7.2.gemfile} +1 -1
- data/gemfiles/{rails_7.0.gemfile → rails_8.0.gemfile} +1 -1
- data/lib/pg_ha_migrations/allowed_versions.rb +1 -1
- data/lib/pg_ha_migrations/constraint.rb +1 -0
- data/lib/pg_ha_migrations/lock_mode.rb +12 -0
- data/lib/pg_ha_migrations/relation.rb +76 -7
- data/lib/pg_ha_migrations/safe_statements.rb +175 -115
- data/lib/pg_ha_migrations/unsafe_statements.rb +149 -31
- data/lib/pg_ha_migrations/version.rb +1 -1
- data/lib/pg_ha_migrations.rb +2 -1
- data/pg_ha_migrations.gemspec +3 -3
- metadata +14 -16
@@ -3,12 +3,12 @@ module PgHaMigrations::SafeStatements
|
|
3
3
|
@safe_added_columns_without_default_value ||= []
|
4
4
|
end
|
5
5
|
|
6
|
-
def safe_create_table(table, options
|
6
|
+
def safe_create_table(table, **options, &block)
|
7
7
|
if options[:force]
|
8
8
|
raise PgHaMigrations::UnsafeMigrationError.new(":force is NOT SAFE! Explicitly call unsafe_drop_table first if you want to recreate an existing table")
|
9
9
|
end
|
10
10
|
|
11
|
-
unsafe_create_table(table, options, &block)
|
11
|
+
unsafe_create_table(table, **options, &block)
|
12
12
|
end
|
13
13
|
|
14
14
|
def safe_create_enum_type(name, values=nil)
|
@@ -16,28 +16,20 @@ module PgHaMigrations::SafeStatements
|
|
16
16
|
when nil
|
17
17
|
raise ArgumentError, "safe_create_enum_type expects a set of values; if you want an enum with no values please pass an empty array"
|
18
18
|
when []
|
19
|
-
|
19
|
+
raw_execute("CREATE TYPE #{PG::Connection.quote_ident(name.to_s)} AS ENUM ()")
|
20
20
|
else
|
21
21
|
escaped_values = values.map do |value|
|
22
22
|
"'#{PG::Connection.escape_string(value.to_s)}'"
|
23
23
|
end
|
24
|
-
|
24
|
+
raw_execute("CREATE TYPE #{PG::Connection.quote_ident(name.to_s)} AS ENUM (#{escaped_values.join(',')})")
|
25
25
|
end
|
26
26
|
end
|
27
27
|
|
28
28
|
def safe_add_enum_value(name, value)
|
29
|
-
|
29
|
+
raw_execute("ALTER TYPE #{PG::Connection.quote_ident(name.to_s)} ADD VALUE '#{PG::Connection.escape_string(value)}'")
|
30
30
|
end
|
31
31
|
|
32
|
-
def
|
33
|
-
if ActiveRecord::Base.connection.postgresql_version < 10_00_00
|
34
|
-
raise PgHaMigrations::InvalidMigrationError, "Renaming an enum value is not supported on Postgres databases before version 10"
|
35
|
-
end
|
36
|
-
|
37
|
-
unsafe_execute("ALTER TYPE #{PG::Connection.quote_ident(name.to_s)} RENAME VALUE '#{PG::Connection.escape_string(old_value)}' TO '#{PG::Connection.escape_string(new_value)}'")
|
38
|
-
end
|
39
|
-
|
40
|
-
def safe_add_column(table, column, type, options = {})
|
32
|
+
def safe_add_column(table, column, type, **options)
|
41
33
|
# Note: we don't believe we need to consider the odd case where
|
42
34
|
# `:default => nil` or `:default => -> { null }` (or similar) is
|
43
35
|
# passed because:
|
@@ -53,20 +45,14 @@ module PgHaMigrations::SafeStatements
|
|
53
45
|
raise PgHaMigrations::UnsafeMigrationError.new(":default is not safe if the default value is volatile. Use safe_change_column_default afterwards then backfill the data to prevent locking the table")
|
54
46
|
end
|
55
47
|
elsif options[:null] == false
|
56
|
-
raise PgHaMigrations::UnsafeMigrationError.new(":null => false is NOT SAFE if the table has data! If you
|
48
|
+
raise PgHaMigrations::UnsafeMigrationError.new(":null => false is NOT SAFE if the table has data! If you want to do this, use safe_make_column_not_nullable")
|
57
49
|
end
|
58
50
|
|
59
51
|
unless options.has_key?(:default)
|
60
52
|
self.safe_added_columns_without_default_value << [table.to_s, column.to_s]
|
61
53
|
end
|
62
54
|
|
63
|
-
unsafe_add_column(table, column, type, options)
|
64
|
-
end
|
65
|
-
|
66
|
-
def unsafe_add_column(table, column, type, options = {})
|
67
|
-
safely_acquire_lock_for_table(table) do
|
68
|
-
super(table, column, type, **options)
|
69
|
-
end
|
55
|
+
unsafe_add_column(table, column, type, **options)
|
70
56
|
end
|
71
57
|
|
72
58
|
def safe_change_column_default(table_name, column_name, default_value)
|
@@ -133,22 +119,110 @@ module PgHaMigrations::SafeStatements
|
|
133
119
|
end
|
134
120
|
|
135
121
|
def safe_make_column_nullable(table, column)
|
122
|
+
quoted_table_name = connection.quote_table_name(table)
|
123
|
+
quoted_column_name = connection.quote_column_name(column)
|
124
|
+
|
136
125
|
safely_acquire_lock_for_table(table) do
|
137
|
-
|
126
|
+
raw_execute "ALTER TABLE #{quoted_table_name} ALTER COLUMN #{quoted_column_name} DROP NOT NULL"
|
138
127
|
end
|
139
128
|
end
|
140
129
|
|
141
|
-
|
130
|
+
# Postgres 12+ can use a valid CHECK constraint to validate that no values of a column are null, avoiding
|
131
|
+
# a full table scan while holding an exclusive lock on the table when altering a column to NOT NULL
|
132
|
+
#
|
133
|
+
# Source:
|
134
|
+
# https://dba.stackexchange.com/questions/267947/how-can-i-set-a-column-to-not-null-without-locking-the-table-during-a-table-scan/268128#268128
|
135
|
+
# (https://archive.is/X55up)
|
136
|
+
def safe_make_column_not_nullable(table, column)
|
137
|
+
if ActiveRecord::Base.connection.postgresql_version < 12_00_00
|
138
|
+
raise PgHaMigrations::InvalidMigrationError, "Cannot safely make a column non-nullable before Postgres 12"
|
139
|
+
end
|
140
|
+
|
141
|
+
validated_table = PgHaMigrations::Table.from_table_name(table)
|
142
|
+
tmp_constraint_name = "tmp_not_null_constraint_#{OpenSSL::Digest::SHA256.hexdigest(column.to_s).first(7)}"
|
143
|
+
|
144
|
+
if validated_table.check_constraints.any? { |c| c.name == tmp_constraint_name }
|
145
|
+
raise PgHaMigrations::InvalidMigrationError, "A constraint #{tmp_constraint_name.inspect} already exists. " \
|
146
|
+
"This implies that a previous invocation of this method failed and left behind a temporary constraint. " \
|
147
|
+
"Please drop the constraint before attempting to run this method again."
|
148
|
+
end
|
149
|
+
|
150
|
+
safe_add_unvalidated_check_constraint(table, "#{connection.quote_column_name(column)} IS NOT NULL", name: tmp_constraint_name)
|
151
|
+
safe_validate_check_constraint(table, name: tmp_constraint_name)
|
152
|
+
|
153
|
+
# "Ordinarily this is checked during the ALTER TABLE by scanning the entire table; however, if a
|
154
|
+
# valid CHECK constraint is found which proves no NULL can exist, then the table scan is
|
155
|
+
# skipped."
|
156
|
+
# See: https://www.postgresql.org/docs/current/sql-altertable.html#SQL-ALTERTABLE-DESC-SET-DROP-NOT-NULL
|
157
|
+
unsafe_make_column_not_nullable(table, column)
|
158
|
+
unsafe_remove_constraint(table, name: tmp_constraint_name)
|
159
|
+
end
|
160
|
+
|
161
|
+
# This method is a variant of `safe_make_column_not_nullable` that is expected to always be fast;
|
162
|
+
# i.e., it will not perform a full table scan to check for null values.
|
163
|
+
def safe_make_column_not_nullable_from_check_constraint(table, column, constraint_name:, drop_constraint: true)
|
164
|
+
unless ActiveRecord::Base.connection.postgresql_version >= 12_00_00
|
165
|
+
raise PgHaMigrations::InvalidMigrationError, "Cannot safely make a column non-nullable before Postgres 12"
|
166
|
+
end
|
167
|
+
|
168
|
+
unless constraint_name
|
169
|
+
raise ArgumentError, "Expected <constraint_name> to be present"
|
170
|
+
end
|
171
|
+
constraint_name = constraint_name.to_s
|
172
|
+
|
173
|
+
quoted_table_name = connection.quote_table_name(table)
|
174
|
+
quoted_column_name = connection.quote_column_name(column)
|
175
|
+
|
176
|
+
validated_table = PgHaMigrations::Table.from_table_name(table)
|
177
|
+
constraint = validated_table.check_constraints.find do |c|
|
178
|
+
c.name == constraint_name
|
179
|
+
end
|
180
|
+
|
181
|
+
unless constraint
|
182
|
+
raise PgHaMigrations::InvalidMigrationError, "The provided constraint does not exist"
|
183
|
+
end
|
184
|
+
|
185
|
+
unless constraint.validated
|
186
|
+
raise PgHaMigrations::InvalidMigrationError, "The provided constraint is not validated"
|
187
|
+
end
|
188
|
+
|
189
|
+
# The constraint has to actually prove that no null values exist, so the
|
190
|
+
# constraint condition can't simply include the `IS NOT NULL` check. We
|
191
|
+
# don't try to handle all possible cases here. For example,
|
192
|
+
# `a IS NOT NULL AND b IS NOT NULL` would prove what we need, but it would
|
193
|
+
# be complicated to check. We must ensure, however, that we're not too
|
194
|
+
# loose. For example, `a IS NOT NULL OR b IS NOT NULL` would not prove that
|
195
|
+
# `a IS NOT NULL`.
|
196
|
+
unless constraint.definition =~ /\ACHECK \(*(#{Regexp.escape(column.to_s)}|#{Regexp.escape(quoted_column_name)}) IS NOT NULL\)*\Z/i
|
197
|
+
raise PgHaMigrations::InvalidMigrationError, "The provided constraint does not enforce non-null values for the column"
|
198
|
+
end
|
199
|
+
|
200
|
+
# We don't want to acquire an exclusive lock on the table twice, and we also don't want it to be
|
201
|
+
# posssible to have the NOT NULL constraint addition succeed while the constraint removal fails,
|
202
|
+
# so we acquire the lock once and do both operations in the same block.
|
142
203
|
safely_acquire_lock_for_table(table) do
|
143
|
-
|
204
|
+
# "Ordinarily this is checked during the ALTER TABLE by scanning the entire table; however, if a
|
205
|
+
# valid CHECK constraint is found which proves no NULL can exist, then the table scan is
|
206
|
+
# skipped."
|
207
|
+
# See: https://www.postgresql.org/docs/current/sql-altertable.html#SQL-ALTERTABLE-DESC-SET-DROP-NOT-NULL
|
208
|
+
unsafe_make_column_not_nullable(table, column)
|
209
|
+
|
210
|
+
if drop_constraint
|
211
|
+
unsafe_remove_constraint(table, name: constraint_name)
|
212
|
+
end
|
144
213
|
end
|
145
214
|
end
|
146
215
|
|
147
|
-
def safe_add_index_on_empty_table(table, columns, options
|
216
|
+
def safe_add_index_on_empty_table(table, columns, **options)
|
148
217
|
if options[:algorithm] == :concurrently
|
149
218
|
raise ArgumentError, "Cannot call safe_add_index_on_empty_table with :algorithm => :concurrently"
|
150
219
|
end
|
151
220
|
|
221
|
+
# Check if nulls_not_distinct was provided but PostgreSQL version doesn't support it
|
222
|
+
if options[:nulls_not_distinct] && ActiveRecord::Base.connection.postgresql_version < 15_00_00
|
223
|
+
raise PgHaMigrations::InvalidMigrationError, "nulls_not_distinct option requires PostgreSQL 15 or higher"
|
224
|
+
end
|
225
|
+
|
152
226
|
# Avoids taking out an unnecessary SHARE lock if the table does have data
|
153
227
|
ensure_small_table!(table, empty: true)
|
154
228
|
|
@@ -160,11 +234,16 @@ module PgHaMigrations::SafeStatements
|
|
160
234
|
end
|
161
235
|
end
|
162
236
|
|
163
|
-
def safe_add_concurrent_index(table, columns, options
|
237
|
+
def safe_add_concurrent_index(table, columns, **options)
|
238
|
+
# Check if nulls_not_distinct was provided but PostgreSQL version doesn't support it
|
239
|
+
if options[:nulls_not_distinct] && ActiveRecord::Base.connection.postgresql_version < 15_00_00
|
240
|
+
raise PgHaMigrations::InvalidMigrationError, "nulls_not_distinct option requires PostgreSQL 15 or higher"
|
241
|
+
end
|
242
|
+
|
164
243
|
unsafe_add_index(table, columns, **options.merge(:algorithm => :concurrently))
|
165
244
|
end
|
166
245
|
|
167
|
-
def safe_remove_concurrent_index(table, options
|
246
|
+
def safe_remove_concurrent_index(table, **options)
|
168
247
|
unless options.is_a?(Hash) && options.key?(:name)
|
169
248
|
raise ArgumentError, "Expected safe_remove_concurrent_index to be called with arguments (table_name, :name => ...)"
|
170
249
|
end
|
@@ -184,8 +263,13 @@ module PgHaMigrations::SafeStatements
|
|
184
263
|
using: nil,
|
185
264
|
unique: nil,
|
186
265
|
where: nil,
|
187
|
-
comment: nil
|
266
|
+
comment: nil,
|
267
|
+
nulls_not_distinct: nil
|
188
268
|
)
|
269
|
+
# Check if nulls_not_distinct was provided but PostgreSQL version doesn't support it
|
270
|
+
if !nulls_not_distinct.nil? && ActiveRecord::Base.connection.postgresql_version < 15_00_00
|
271
|
+
raise PgHaMigrations::InvalidMigrationError, "nulls_not_distinct option requires PostgreSQL 15 or higher"
|
272
|
+
end
|
189
273
|
|
190
274
|
if ActiveRecord::Base.connection.postgresql_version < 11_00_00
|
191
275
|
raise PgHaMigrations::InvalidMigrationError, "Concurrent partitioned index creation not supported on Postgres databases before version 11"
|
@@ -208,21 +292,19 @@ module PgHaMigrations::SafeStatements
|
|
208
292
|
PgHaMigrations::Index.from_table_and_columns(child_table, columns)
|
209
293
|
end
|
210
294
|
|
211
|
-
#
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
)
|
225
|
-
end
|
295
|
+
# CREATE INDEX ON ONLY parent_table
|
296
|
+
unsafe_add_index(
|
297
|
+
parent_table.fully_qualified_name,
|
298
|
+
columns,
|
299
|
+
name: parent_index.name,
|
300
|
+
if_not_exists: if_not_exists,
|
301
|
+
using: using,
|
302
|
+
unique: unique,
|
303
|
+
nulls_not_distinct: nulls_not_distinct,
|
304
|
+
where: where,
|
305
|
+
comment: comment,
|
306
|
+
algorithm: :only, # see lib/pg_ha_migrations/hacks/add_index_on_only.rb
|
307
|
+
)
|
226
308
|
|
227
309
|
child_indexes.each do |child_index|
|
228
310
|
add_index_method = if child_index.table.natively_partitioned?
|
@@ -239,6 +321,7 @@ module PgHaMigrations::SafeStatements
|
|
239
321
|
if_not_exists: if_not_exists,
|
240
322
|
using: using,
|
241
323
|
unique: unique,
|
324
|
+
nulls_not_distinct: nulls_not_distinct,
|
242
325
|
where: where,
|
243
326
|
)
|
244
327
|
end
|
@@ -261,7 +344,7 @@ module PgHaMigrations::SafeStatements
|
|
261
344
|
end
|
262
345
|
|
263
346
|
def safe_set_maintenance_work_mem_gb(gigabytes)
|
264
|
-
|
347
|
+
raw_execute("SET maintenance_work_mem = '#{PG::Connection.escape_string(gigabytes.to_s)} GB'")
|
265
348
|
end
|
266
349
|
|
267
350
|
def safe_add_unvalidated_check_constraint(table, expression, name:)
|
@@ -296,20 +379,6 @@ module PgHaMigrations::SafeStatements
|
|
296
379
|
end
|
297
380
|
end
|
298
381
|
|
299
|
-
def unsafe_remove_constraint(table, name:)
|
300
|
-
raise ArgumentError, "Expected <name> to be present" unless name.present?
|
301
|
-
|
302
|
-
quoted_table_name = connection.quote_table_name(table)
|
303
|
-
quoted_constraint_name = connection.quote_table_name(name)
|
304
|
-
sql = "ALTER TABLE #{quoted_table_name} DROP CONSTRAINT #{quoted_constraint_name}"
|
305
|
-
|
306
|
-
safely_acquire_lock_for_table(table) do
|
307
|
-
say_with_time "remove_constraint(#{table.inspect}, name: #{name.inspect})" do
|
308
|
-
connection.execute(sql)
|
309
|
-
end
|
310
|
-
end
|
311
|
-
end
|
312
|
-
|
313
382
|
def safe_create_partitioned_table(table, partition_key:, type:, infer_primary_key: nil, **options, &block)
|
314
383
|
raise ArgumentError, "Expected <partition_key> to be present" unless partition_key.present?
|
315
384
|
|
@@ -351,7 +420,7 @@ module PgHaMigrations::SafeStatements
|
|
351
420
|
|
352
421
|
options[:options] = "PARTITION BY #{type.upcase} (#{quoted_partition_key})"
|
353
422
|
|
354
|
-
safe_create_table(table, options) do |td|
|
423
|
+
safe_create_table(table, **options) do |td|
|
355
424
|
block.call(td) if block
|
356
425
|
|
357
426
|
next unless options[:id]
|
@@ -370,15 +439,7 @@ module PgHaMigrations::SafeStatements
|
|
370
439
|
end
|
371
440
|
end
|
372
441
|
|
373
|
-
def safe_partman_create_parent(
|
374
|
-
if options[:retention].present? || options[:retention_keep_table] == false
|
375
|
-
raise PgHaMigrations::UnsafeMigrationError.new(":retention and/or :retention_keep_table => false can potentially result in data loss if misconfigured. Please use unsafe_partman_create_parent if you want to set these options")
|
376
|
-
end
|
377
|
-
|
378
|
-
unsafe_partman_create_parent(table, **options)
|
379
|
-
end
|
380
|
-
|
381
|
-
def unsafe_partman_create_parent(
|
442
|
+
def safe_partman_create_parent(
|
382
443
|
table,
|
383
444
|
partition_key:,
|
384
445
|
interval:,
|
@@ -452,26 +513,6 @@ module PgHaMigrations::SafeStatements
|
|
452
513
|
unsafe_partman_update_config(table, **options)
|
453
514
|
end
|
454
515
|
|
455
|
-
def unsafe_partman_update_config(table, **options)
|
456
|
-
invalid_options = options.keys - PgHaMigrations::PARTMAN_UPDATE_CONFIG_OPTIONS
|
457
|
-
|
458
|
-
raise ArgumentError, "Unrecognized argument(s): #{invalid_options}" unless invalid_options.empty?
|
459
|
-
|
460
|
-
PgHaMigrations::PartmanConfig.schema = _quoted_partman_schema
|
461
|
-
|
462
|
-
config = PgHaMigrations::PartmanConfig.find(_fully_qualified_table_name_for_partman(table))
|
463
|
-
|
464
|
-
config.assign_attributes(**options)
|
465
|
-
|
466
|
-
inherit_privileges_changed = config.inherit_privileges_changed?
|
467
|
-
|
468
|
-
say_with_time "partman_update_config(#{table.inspect}, #{options.map { |k,v| "#{k}: #{v.inspect}" }.join(", ")})" do
|
469
|
-
config.save!
|
470
|
-
end
|
471
|
-
|
472
|
-
safe_partman_reapply_privileges(table) if inherit_privileges_changed
|
473
|
-
end
|
474
|
-
|
475
516
|
def safe_partman_reapply_privileges(table)
|
476
517
|
say_with_time "partman_reapply_privileges(#{table.inspect})" do
|
477
518
|
connection.execute("SELECT #{_quoted_partman_schema}.reapply_privileges('#{_fully_qualified_table_name_for_partman(table)}')")
|
@@ -528,40 +569,50 @@ module PgHaMigrations::SafeStatements
|
|
528
569
|
super(conn, direction)
|
529
570
|
end
|
530
571
|
|
531
|
-
def safely_acquire_lock_for_table(
|
532
|
-
nested_target_table = Thread.current[__method__]
|
533
|
-
|
572
|
+
def safely_acquire_lock_for_table(*tables, mode: :access_exclusive, &block)
|
534
573
|
_check_postgres_adapter!
|
535
574
|
|
536
|
-
|
575
|
+
target_tables = PgHaMigrations::TableCollection.from_table_names(tables, mode)
|
537
576
|
|
538
|
-
if
|
539
|
-
if
|
540
|
-
raise PgHaMigrations::InvalidMigrationError,
|
541
|
-
|
542
|
-
|
577
|
+
if @parent_lock_tables
|
578
|
+
if !target_tables.subset?(@parent_lock_tables)
|
579
|
+
raise PgHaMigrations::InvalidMigrationError,
|
580
|
+
"Nested lock detected! Cannot acquire lock on #{target_tables.to_sql} " \
|
581
|
+
"while #{@parent_lock_tables.to_sql} is locked."
|
543
582
|
end
|
544
|
-
else
|
545
|
-
Thread.current[__method__] = target_table
|
546
|
-
end
|
547
583
|
|
548
|
-
|
549
|
-
|
550
|
-
|
584
|
+
if @parent_lock_tables.mode < target_tables.mode
|
585
|
+
raise PgHaMigrations::InvalidMigrationError,
|
586
|
+
"Lock escalation detected! Cannot change lock level from :#{@parent_lock_tables.mode} " \
|
587
|
+
"to :#{target_tables.mode} for #{target_tables.to_sql}."
|
588
|
+
end
|
589
|
+
|
590
|
+
# If in a nested context and all of the above checks have passed,
|
591
|
+
# we have already acquired the lock (or a lock at a higher level),
|
592
|
+
# and can simply execute the block and short-circuit.
|
593
|
+
block.call
|
594
|
+
|
595
|
+
return
|
596
|
+
end
|
551
597
|
|
552
598
|
successfully_acquired_lock = false
|
553
599
|
|
554
600
|
until successfully_acquired_lock
|
555
|
-
|
601
|
+
loop do
|
556
602
|
blocking_transactions = PgHaMigrations::BlockingDatabaseTransactions.find_blocking_transactions("#{PgHaMigrations::LOCK_TIMEOUT_SECONDS} seconds")
|
557
|
-
|
603
|
+
|
604
|
+
# Locking a partitioned table will also lock child tables (including sub-partitions),
|
605
|
+
# so we need to check for blocking queries on those tables as well
|
606
|
+
target_tables_with_partitions = target_tables.with_partitions
|
607
|
+
|
608
|
+
break unless blocking_transactions.any? do |query|
|
558
609
|
query.tables_with_locks.any? do |locked_table|
|
559
|
-
|
610
|
+
target_tables_with_partitions.any? do |target_table|
|
560
611
|
target_table.conflicts_with?(locked_table)
|
561
612
|
end
|
562
613
|
end
|
563
614
|
end
|
564
|
-
|
615
|
+
|
565
616
|
say "Waiting on blocking transactions:"
|
566
617
|
blocking_transactions.each do |blocking_transaction|
|
567
618
|
say blocking_transaction.description
|
@@ -570,16 +621,21 @@ module PgHaMigrations::SafeStatements
|
|
570
621
|
end
|
571
622
|
|
572
623
|
connection.transaction do
|
573
|
-
adjust_timeout_method = connection.postgresql_version >= 9_03_00 ? :adjust_lock_timeout : :adjust_statement_timeout
|
574
624
|
begin
|
575
|
-
|
576
|
-
|
625
|
+
# A lock timeout would apply to each individual table in the query,
|
626
|
+
# so we made a conscious decision to use a statement timeout here
|
627
|
+
# to keep behavior consistent in a multi-table lock scenario.
|
628
|
+
adjust_statement_timeout(PgHaMigrations::LOCK_TIMEOUT_SECONDS) do
|
629
|
+
connection.execute("LOCK #{target_tables.to_sql} IN #{target_tables.mode.to_sql} MODE;")
|
577
630
|
end
|
578
631
|
successfully_acquired_lock = true
|
579
632
|
rescue ActiveRecord::StatementInvalid => e
|
633
|
+
# It is still possible to hit a lock timeout if the session has
|
634
|
+
# that value set to something less than LOCK_TIMEOUT_SECONDS.
|
635
|
+
# We should retry when either of these exceptions are raised.
|
580
636
|
if e.message =~ /PG::LockNotAvailable.+ lock timeout/ || e.message =~ /PG::QueryCanceled.+ statement timeout/
|
581
637
|
sleep_seconds = PgHaMigrations::LOCK_FAILURE_RETRY_DELAY_MULTLIPLIER * PgHaMigrations::LOCK_TIMEOUT_SECONDS
|
582
|
-
say "Timed out trying to acquire #{
|
638
|
+
say "Timed out trying to acquire #{target_tables.mode.to_sql} lock on #{target_tables.to_sql}."
|
583
639
|
say "Sleeping for #{sleep_seconds}s to allow potentially queued up queries to finish before continuing."
|
584
640
|
sleep(sleep_seconds)
|
585
641
|
|
@@ -590,12 +646,16 @@ module PgHaMigrations::SafeStatements
|
|
590
646
|
end
|
591
647
|
|
592
648
|
if successfully_acquired_lock
|
593
|
-
|
649
|
+
@parent_lock_tables = target_tables
|
650
|
+
|
651
|
+
begin
|
652
|
+
block.call
|
653
|
+
ensure
|
654
|
+
@parent_lock_tables = nil
|
655
|
+
end
|
594
656
|
end
|
595
657
|
end
|
596
658
|
end
|
597
|
-
ensure
|
598
|
-
Thread.current[__method__] = nil unless nested_target_table
|
599
659
|
end
|
600
660
|
|
601
661
|
def adjust_lock_timeout(timeout_seconds = PgHaMigrations::LOCK_TIMEOUT_SECONDS, &block)
|