pg_ha_migrations 1.8.0 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,12 +3,16 @@ module PgHaMigrations::SafeStatements
3
3
  @safe_added_columns_without_default_value ||= []
4
4
  end
5
5
 
6
- def safe_create_table(table, options={}, &block)
6
+ def partman_extension
7
+ @partman_extension ||= PgHaMigrations::Extension.new("pg_partman")
8
+ end
9
+
10
+ def safe_create_table(table, **options, &block)
7
11
  if options[:force]
8
12
  raise PgHaMigrations::UnsafeMigrationError.new(":force is NOT SAFE! Explicitly call unsafe_drop_table first if you want to recreate an existing table")
9
13
  end
10
14
 
11
- unsafe_create_table(table, options, &block)
15
+ unsafe_create_table(table, **options, &block)
12
16
  end
13
17
 
14
18
  def safe_create_enum_type(name, values=nil)
@@ -16,28 +20,20 @@ module PgHaMigrations::SafeStatements
16
20
  when nil
17
21
  raise ArgumentError, "safe_create_enum_type expects a set of values; if you want an enum with no values please pass an empty array"
18
22
  when []
19
- unsafe_execute("CREATE TYPE #{PG::Connection.quote_ident(name.to_s)} AS ENUM ()")
23
+ raw_execute("CREATE TYPE #{PG::Connection.quote_ident(name.to_s)} AS ENUM ()")
20
24
  else
21
25
  escaped_values = values.map do |value|
22
26
  "'#{PG::Connection.escape_string(value.to_s)}'"
23
27
  end
24
- unsafe_execute("CREATE TYPE #{PG::Connection.quote_ident(name.to_s)} AS ENUM (#{escaped_values.join(',')})")
28
+ raw_execute("CREATE TYPE #{PG::Connection.quote_ident(name.to_s)} AS ENUM (#{escaped_values.join(',')})")
25
29
  end
26
30
  end
27
31
 
28
32
  def safe_add_enum_value(name, value)
29
- unsafe_execute("ALTER TYPE #{PG::Connection.quote_ident(name.to_s)} ADD VALUE '#{PG::Connection.escape_string(value)}'")
30
- end
31
-
32
- def unsafe_rename_enum_value(name, old_value, new_value)
33
- if ActiveRecord::Base.connection.postgresql_version < 10_00_00
34
- raise PgHaMigrations::InvalidMigrationError, "Renaming an enum value is not supported on Postgres databases before version 10"
35
- end
36
-
37
- unsafe_execute("ALTER TYPE #{PG::Connection.quote_ident(name.to_s)} RENAME VALUE '#{PG::Connection.escape_string(old_value)}' TO '#{PG::Connection.escape_string(new_value)}'")
33
+ raw_execute("ALTER TYPE #{PG::Connection.quote_ident(name.to_s)} ADD VALUE '#{PG::Connection.escape_string(value)}'")
38
34
  end
39
35
 
40
- def safe_add_column(table, column, type, options = {})
36
+ def safe_add_column(table, column, type, **options)
41
37
  # Note: we don't believe we need to consider the odd case where
42
38
  # `:default => nil` or `:default => -> { null }` (or similar) is
43
39
  # passed because:
@@ -53,20 +49,14 @@ module PgHaMigrations::SafeStatements
53
49
  raise PgHaMigrations::UnsafeMigrationError.new(":default is not safe if the default value is volatile. Use safe_change_column_default afterwards then backfill the data to prevent locking the table")
54
50
  end
55
51
  elsif options[:null] == false
56
- raise PgHaMigrations::UnsafeMigrationError.new(":null => false is NOT SAFE if the table has data! If you _really_ want to do this, use unsafe_make_column_not_nullable")
52
+ raise PgHaMigrations::UnsafeMigrationError.new(":null => false is NOT SAFE if the table has data! If you want to do this, use safe_make_column_not_nullable")
57
53
  end
58
54
 
59
55
  unless options.has_key?(:default)
60
56
  self.safe_added_columns_without_default_value << [table.to_s, column.to_s]
61
57
  end
62
58
 
63
- unsafe_add_column(table, column, type, options)
64
- end
65
-
66
- def unsafe_add_column(table, column, type, options = {})
67
- safely_acquire_lock_for_table(table) do
68
- super(table, column, type, **options)
69
- end
59
+ unsafe_add_column(table, column, type, **options)
70
60
  end
71
61
 
72
62
  def safe_change_column_default(table_name, column_name, default_value)
@@ -133,22 +123,126 @@ module PgHaMigrations::SafeStatements
133
123
  end
134
124
 
135
125
  def safe_make_column_nullable(table, column)
126
+ quoted_table_name = connection.quote_table_name(table)
127
+ quoted_column_name = connection.quote_column_name(column)
128
+
136
129
  safely_acquire_lock_for_table(table) do
137
- unsafe_execute "ALTER TABLE #{table} ALTER COLUMN #{column} DROP NOT NULL"
130
+ raw_execute "ALTER TABLE #{quoted_table_name} ALTER COLUMN #{quoted_column_name} DROP NOT NULL"
131
+ end
132
+ end
133
+
134
+ # Postgres 12+ can use a valid CHECK constraint to validate that no values of a column are null, avoiding
135
+ # a full table scan while holding an exclusive lock on the table when altering a column to NOT NULL
136
+ #
137
+ # Source:
138
+ # https://dba.stackexchange.com/questions/267947/how-can-i-set-a-column-to-not-null-without-locking-the-table-during-a-table-scan/268128#268128
139
+ # (https://archive.is/X55up)
140
+ def safe_make_column_not_nullable(table, column)
141
+ if ActiveRecord::Base.connection.postgresql_version < 12_00_00
142
+ raise PgHaMigrations::InvalidMigrationError, "Cannot safely make a column non-nullable before Postgres 12"
143
+ end
144
+
145
+ validated_table = PgHaMigrations::Table.from_table_name(table)
146
+ quoted_column_name = connection.quote_column_name(column)
147
+ column_str = column.to_s
148
+
149
+ # First, look for existing constraints that match the IS NOT NULL pattern for this column
150
+ existing_constraint = validated_table.check_constraints.select do |c|
151
+ c.definition =~ /\ACHECK \(*(#{Regexp.escape(column_str)}|#{Regexp.escape(quoted_column_name)}) IS NOT NULL\)*\Z/i
152
+ end.first
153
+
154
+ constraint_name = nil
155
+ if existing_constraint
156
+ if existing_constraint.validated
157
+ say "Found existing validated constraint #{existing_constraint.inspect} for column #{column_str}, using it directly"
158
+ else
159
+ say "Found existing unvalidated constraint #{existing_constraint.inspect} for column #{column_str}, validating it first"
160
+ safe_validate_check_constraint(table, name: existing_constraint.name)
161
+ end
162
+ constraint_name = existing_constraint.name
163
+ else
164
+ # Create a temporary constraint if no matching constraints exist
165
+ constraint_name = "tmp_not_null_constraint_#{OpenSSL::Digest::SHA256.hexdigest(column.to_s).first(7)}"
166
+
167
+ safe_add_unvalidated_check_constraint(table, "#{quoted_column_name} IS NOT NULL", name: constraint_name)
168
+ safe_validate_check_constraint(table, name: constraint_name)
138
169
  end
170
+
171
+ # "Ordinarily this is checked during the ALTER TABLE by scanning the entire table; however, if a
172
+ # valid CHECK constraint is found which proves no NULL can exist, then the table scan is
173
+ # skipped."
174
+ # See: https://www.postgresql.org/docs/current/sql-altertable.html#SQL-ALTERTABLE-DESC-SET-DROP-NOT-NULL
175
+ unsafe_make_column_not_nullable(table, column)
176
+
177
+ # Always drop the constraint at the end, whether it was existing or temporary
178
+ unsafe_remove_constraint(table, name: constraint_name)
139
179
  end
140
180
 
141
- def unsafe_make_column_not_nullable(table, column, options={}) # options arg is only present for backwards compatiblity
181
+ # This method is a variant of `safe_make_column_not_nullable` that is expected to always be fast;
182
+ # i.e., it will not perform a full table scan to check for null values.
183
+ def safe_make_column_not_nullable_from_check_constraint(table, column, constraint_name:, drop_constraint: true)
184
+ unless ActiveRecord::Base.connection.postgresql_version >= 12_00_00
185
+ raise PgHaMigrations::InvalidMigrationError, "Cannot safely make a column non-nullable before Postgres 12"
186
+ end
187
+
188
+ unless constraint_name
189
+ raise ArgumentError, "Expected <constraint_name> to be present"
190
+ end
191
+ constraint_name = constraint_name.to_s
192
+
193
+ quoted_table_name = connection.quote_table_name(table)
194
+ quoted_column_name = connection.quote_column_name(column)
195
+
196
+ validated_table = PgHaMigrations::Table.from_table_name(table)
197
+ constraint = validated_table.check_constraints.find do |c|
198
+ c.name == constraint_name
199
+ end
200
+
201
+ unless constraint
202
+ raise PgHaMigrations::InvalidMigrationError, "The provided constraint does not exist"
203
+ end
204
+
205
+ unless constraint.validated
206
+ raise PgHaMigrations::InvalidMigrationError, "The provided constraint is not validated"
207
+ end
208
+
209
+ # The constraint has to actually prove that no null values exist, so the
210
+ # constraint condition can't simply include the `IS NOT NULL` check. We
211
+ # don't try to handle all possible cases here. For example,
212
+ # `a IS NOT NULL AND b IS NOT NULL` would prove what we need, but it would
213
+ # be complicated to check. We must ensure, however, that we're not too
214
+ # loose. For example, `a IS NOT NULL OR b IS NOT NULL` would not prove that
215
+ # `a IS NOT NULL`.
216
+ unless constraint.definition =~ /\ACHECK \(*(#{Regexp.escape(column.to_s)}|#{Regexp.escape(quoted_column_name)}) IS NOT NULL\)*\Z/i
217
+ raise PgHaMigrations::InvalidMigrationError, "The provided constraint does not enforce non-null values for the column"
218
+ end
219
+
220
+ # We don't want to acquire an exclusive lock on the table twice, and we also don't want it to be
221
+ # posssible to have the NOT NULL constraint addition succeed while the constraint removal fails,
222
+ # so we acquire the lock once and do both operations in the same block.
142
223
  safely_acquire_lock_for_table(table) do
143
- unsafe_execute "ALTER TABLE #{table} ALTER COLUMN #{column} SET NOT NULL"
224
+ # "Ordinarily this is checked during the ALTER TABLE by scanning the entire table; however, if a
225
+ # valid CHECK constraint is found which proves no NULL can exist, then the table scan is
226
+ # skipped."
227
+ # See: https://www.postgresql.org/docs/current/sql-altertable.html#SQL-ALTERTABLE-DESC-SET-DROP-NOT-NULL
228
+ unsafe_make_column_not_nullable(table, column)
229
+
230
+ if drop_constraint
231
+ unsafe_remove_constraint(table, name: constraint_name)
232
+ end
144
233
  end
145
234
  end
146
235
 
147
- def safe_add_index_on_empty_table(table, columns, options={})
236
+ def safe_add_index_on_empty_table(table, columns, **options)
148
237
  if options[:algorithm] == :concurrently
149
238
  raise ArgumentError, "Cannot call safe_add_index_on_empty_table with :algorithm => :concurrently"
150
239
  end
151
240
 
241
+ # Check if nulls_not_distinct was provided but PostgreSQL version doesn't support it
242
+ if options[:nulls_not_distinct] && ActiveRecord::Base.connection.postgresql_version < 15_00_00
243
+ raise PgHaMigrations::InvalidMigrationError, "nulls_not_distinct option requires PostgreSQL 15 or higher"
244
+ end
245
+
152
246
  # Avoids taking out an unnecessary SHARE lock if the table does have data
153
247
  ensure_small_table!(table, empty: true)
154
248
 
@@ -160,11 +254,16 @@ module PgHaMigrations::SafeStatements
160
254
  end
161
255
  end
162
256
 
163
- def safe_add_concurrent_index(table, columns, options={})
257
+ def safe_add_concurrent_index(table, columns, **options)
258
+ # Check if nulls_not_distinct was provided but PostgreSQL version doesn't support it
259
+ if options[:nulls_not_distinct] && ActiveRecord::Base.connection.postgresql_version < 15_00_00
260
+ raise PgHaMigrations::InvalidMigrationError, "nulls_not_distinct option requires PostgreSQL 15 or higher"
261
+ end
262
+
164
263
  unsafe_add_index(table, columns, **options.merge(:algorithm => :concurrently))
165
264
  end
166
265
 
167
- def safe_remove_concurrent_index(table, options={})
266
+ def safe_remove_concurrent_index(table, **options)
168
267
  unless options.is_a?(Hash) && options.key?(:name)
169
268
  raise ArgumentError, "Expected safe_remove_concurrent_index to be called with arguments (table_name, :name => ...)"
170
269
  end
@@ -184,8 +283,13 @@ module PgHaMigrations::SafeStatements
184
283
  using: nil,
185
284
  unique: nil,
186
285
  where: nil,
187
- comment: nil
286
+ comment: nil,
287
+ nulls_not_distinct: nil
188
288
  )
289
+ # Check if nulls_not_distinct was provided but PostgreSQL version doesn't support it
290
+ if !nulls_not_distinct.nil? && ActiveRecord::Base.connection.postgresql_version < 15_00_00
291
+ raise PgHaMigrations::InvalidMigrationError, "nulls_not_distinct option requires PostgreSQL 15 or higher"
292
+ end
189
293
 
190
294
  if ActiveRecord::Base.connection.postgresql_version < 11_00_00
191
295
  raise PgHaMigrations::InvalidMigrationError, "Concurrent partitioned index creation not supported on Postgres databases before version 11"
@@ -208,21 +312,19 @@ module PgHaMigrations::SafeStatements
208
312
  PgHaMigrations::Index.from_table_and_columns(child_table, columns)
209
313
  end
210
314
 
211
- # TODO: take out ShareLock after issue #39 is implemented
212
- safely_acquire_lock_for_table(parent_table.fully_qualified_name) do
213
- # CREATE INDEX ON ONLY parent_table
214
- unsafe_add_index(
215
- parent_table.fully_qualified_name,
216
- columns,
217
- name: parent_index.name,
218
- if_not_exists: if_not_exists,
219
- using: using,
220
- unique: unique,
221
- where: where,
222
- comment: comment,
223
- algorithm: :only, # see lib/pg_ha_migrations/hacks/add_index_on_only.rb
224
- )
225
- end
315
+ # CREATE INDEX ON ONLY parent_table
316
+ unsafe_add_index(
317
+ parent_table.fully_qualified_name,
318
+ columns,
319
+ name: parent_index.name,
320
+ if_not_exists: if_not_exists,
321
+ using: using,
322
+ unique: unique,
323
+ nulls_not_distinct: nulls_not_distinct,
324
+ where: where,
325
+ comment: comment,
326
+ algorithm: :only, # see lib/pg_ha_migrations/hacks/add_index_on_only.rb
327
+ )
226
328
 
227
329
  child_indexes.each do |child_index|
228
330
  add_index_method = if child_index.table.natively_partitioned?
@@ -239,6 +341,7 @@ module PgHaMigrations::SafeStatements
239
341
  if_not_exists: if_not_exists,
240
342
  using: using,
241
343
  unique: unique,
344
+ nulls_not_distinct: nulls_not_distinct,
242
345
  where: where,
243
346
  )
244
347
  end
@@ -261,7 +364,7 @@ module PgHaMigrations::SafeStatements
261
364
  end
262
365
 
263
366
  def safe_set_maintenance_work_mem_gb(gigabytes)
264
- unsafe_execute("SET maintenance_work_mem = '#{PG::Connection.escape_string(gigabytes.to_s)} GB'")
367
+ raw_execute("SET maintenance_work_mem = '#{PG::Connection.escape_string(gigabytes.to_s)} GB'")
265
368
  end
266
369
 
267
370
  def safe_add_unvalidated_check_constraint(table, expression, name:)
@@ -296,20 +399,6 @@ module PgHaMigrations::SafeStatements
296
399
  end
297
400
  end
298
401
 
299
- def unsafe_remove_constraint(table, name:)
300
- raise ArgumentError, "Expected <name> to be present" unless name.present?
301
-
302
- quoted_table_name = connection.quote_table_name(table)
303
- quoted_constraint_name = connection.quote_table_name(name)
304
- sql = "ALTER TABLE #{quoted_table_name} DROP CONSTRAINT #{quoted_constraint_name}"
305
-
306
- safely_acquire_lock_for_table(table) do
307
- say_with_time "remove_constraint(#{table.inspect}, name: #{name.inspect})" do
308
- connection.execute(sql)
309
- end
310
- end
311
- end
312
-
313
402
  def safe_create_partitioned_table(table, partition_key:, type:, infer_primary_key: nil, **options, &block)
314
403
  raise ArgumentError, "Expected <partition_key> to be present" unless partition_key.present?
315
404
 
@@ -351,7 +440,7 @@ module PgHaMigrations::SafeStatements
351
440
 
352
441
  options[:options] = "PARTITION BY #{type.upcase} (#{quoted_partition_key})"
353
442
 
354
- safe_create_table(table, options) do |td|
443
+ safe_create_table(table, **options) do |td|
355
444
  block.call(td) if block
356
445
 
357
446
  next unless options[:id]
@@ -370,15 +459,7 @@ module PgHaMigrations::SafeStatements
370
459
  end
371
460
  end
372
461
 
373
- def safe_partman_create_parent(table, **options)
374
- if options[:retention].present? || options[:retention_keep_table] == false
375
- raise PgHaMigrations::UnsafeMigrationError.new(":retention and/or :retention_keep_table => false can potentially result in data loss if misconfigured. Please use unsafe_partman_create_parent if you want to set these options")
376
- end
377
-
378
- unsafe_partman_create_parent(table, **options)
379
- end
380
-
381
- def unsafe_partman_create_parent(
462
+ def safe_partman_create_parent(
382
463
  table,
383
464
  partition_key:,
384
465
  interval:,
@@ -397,6 +478,18 @@ module PgHaMigrations::SafeStatements
397
478
  raise PgHaMigrations::InvalidMigrationError, "Native partitioning with partman not supported on Postgres databases before version 11"
398
479
  end
399
480
 
481
+ raise PgHaMigrations::MissingExtensionError, "The pg_partman extension is not installed" unless partman_extension.installed?
482
+
483
+ if partman_extension.major_version >= 5 || PgHaMigrations.config.partman_5_compatibility_mode
484
+ if PgHaMigrations::PARTMAN_UNSUPPORTED_INTERVALS.include?(interval)
485
+ raise PgHaMigrations::InvalidMigrationError,
486
+ "Special partition interval values (#{interval}) are no longer supported. " \
487
+ "Please use a supported interval time value from core PostgreSQL " \
488
+ "#{(partman_extension.major_version < 5 ? "or turn partman 5 compatibility mode off " : "")}" \
489
+ "(https://www.postgresql.org/docs/current/datatype-datetime.html#DATATYPE-INTERVAL-INPUT)"
490
+ end
491
+ end
492
+
400
493
  formatted_start_partition = nil
401
494
 
402
495
  if start_partition.present?
@@ -411,16 +504,22 @@ module PgHaMigrations::SafeStatements
411
504
  end
412
505
  end
413
506
 
507
+ validated_table = PgHaMigrations::PartmanTable.from_table_name(table)
508
+ validated_template_table = template_table ? PgHaMigrations::PartmanTable.from_table_name(template_table) : nil
509
+
414
510
  create_parent_options = {
415
- parent_table: _fully_qualified_table_name_for_partman(table),
416
- template_table: template_table ? _fully_qualified_table_name_for_partman(template_table) : nil,
511
+ parent_table: validated_table.fully_qualified_name,
512
+ template_table: validated_template_table&.fully_qualified_name,
417
513
  control: partition_key,
418
- type: "native",
419
514
  interval: interval,
420
515
  premake: premake,
421
516
  start_partition: formatted_start_partition,
422
517
  }.compact
423
518
 
519
+ if partman_extension.major_version < 5
520
+ create_parent_options[:type] = "native"
521
+ end
522
+
424
523
  create_parent_sql = create_parent_options.map { |k, v| "p_#{k} := #{connection.quote(v)}" }.join(", ")
425
524
 
426
525
  log_message = "partman_create_parent(#{table.inspect}, " \
@@ -431,7 +530,7 @@ module PgHaMigrations::SafeStatements
431
530
  "template_table: #{template_table.inspect})"
432
531
 
433
532
  say_with_time(log_message) do
434
- connection.execute("SELECT #{_quoted_partman_schema}.create_parent(#{create_parent_sql})")
533
+ connection.execute("SELECT #{partman_extension.quoted_schema}.create_parent(#{create_parent_sql})")
435
534
  end
436
535
 
437
536
  update_config_options = {
@@ -442,6 +541,10 @@ module PgHaMigrations::SafeStatements
442
541
  }.compact
443
542
 
444
543
  unsafe_partman_update_config(table, **update_config_options)
544
+
545
+ if PgHaMigrations.config.partman_5_compatibility_mode && partman_extension.major_version < 5
546
+ unsafe_partman_standardize_partition_naming(table)
547
+ end
445
548
  end
446
549
 
447
550
  def safe_partman_update_config(table, **options)
@@ -452,55 +555,16 @@ module PgHaMigrations::SafeStatements
452
555
  unsafe_partman_update_config(table, **options)
453
556
  end
454
557
 
455
- def unsafe_partman_update_config(table, **options)
456
- invalid_options = options.keys - PgHaMigrations::PARTMAN_UPDATE_CONFIG_OPTIONS
457
-
458
- raise ArgumentError, "Unrecognized argument(s): #{invalid_options}" unless invalid_options.empty?
459
-
460
- PgHaMigrations::PartmanConfig.schema = _quoted_partman_schema
461
-
462
- config = PgHaMigrations::PartmanConfig.find(_fully_qualified_table_name_for_partman(table))
463
-
464
- config.assign_attributes(**options)
465
-
466
- inherit_privileges_changed = config.inherit_privileges_changed?
467
-
468
- say_with_time "partman_update_config(#{table.inspect}, #{options.map { |k,v| "#{k}: #{v.inspect}" }.join(", ")})" do
469
- config.save!
470
- end
558
+ def safe_partman_reapply_privileges(table)
559
+ raise PgHaMigrations::MissingExtensionError, "The pg_partman extension is not installed" unless partman_extension.installed?
471
560
 
472
- safe_partman_reapply_privileges(table) if inherit_privileges_changed
473
- end
561
+ validated_table = PgHaMigrations::PartmanTable.from_table_name(table)
474
562
 
475
- def safe_partman_reapply_privileges(table)
476
563
  say_with_time "partman_reapply_privileges(#{table.inspect})" do
477
- connection.execute("SELECT #{_quoted_partman_schema}.reapply_privileges('#{_fully_qualified_table_name_for_partman(table)}')")
564
+ connection.execute("SELECT #{partman_extension.quoted_schema}.reapply_privileges('#{validated_table.fully_qualified_name}')")
478
565
  end
479
566
  end
480
567
 
481
- def _quoted_partman_schema
482
- schema = connection.select_value(<<~SQL)
483
- SELECT nspname
484
- FROM pg_namespace JOIN pg_extension
485
- ON pg_namespace.oid = pg_extension.extnamespace
486
- WHERE pg_extension.extname = 'pg_partman'
487
- SQL
488
-
489
- raise PgHaMigrations::InvalidMigrationError, "The pg_partman extension is not installed" unless schema.present?
490
-
491
- connection.quote_schema_name(schema)
492
- end
493
-
494
- def _fully_qualified_table_name_for_partman(table)
495
- table = PgHaMigrations::Table.from_table_name(table)
496
-
497
- [table.schema, table.name].each do |identifier|
498
- if identifier.to_s !~ /^[a-z_][a-z_\d]*$/
499
- raise PgHaMigrations::InvalidMigrationError, "Partman requires schema / table names to be lowercase with underscores"
500
- end
501
- end.join(".")
502
- end
503
-
504
568
  def _per_migration_caller
505
569
  @_per_migration_caller ||= Kernel.caller
506
570
  end
@@ -528,40 +592,50 @@ module PgHaMigrations::SafeStatements
528
592
  super(conn, direction)
529
593
  end
530
594
 
531
- def safely_acquire_lock_for_table(table, mode: :access_exclusive, &block)
532
- nested_target_table = Thread.current[__method__]
533
-
595
+ def safely_acquire_lock_for_table(*tables, mode: :access_exclusive, &block)
534
596
  _check_postgres_adapter!
535
597
 
536
- target_table = PgHaMigrations::Table.from_table_name(table, mode)
598
+ target_tables = PgHaMigrations::TableCollection.from_table_names(tables, mode)
537
599
 
538
- if nested_target_table
539
- if nested_target_table != target_table
540
- raise PgHaMigrations::InvalidMigrationError, "Nested lock detected! Cannot acquire lock on #{target_table.fully_qualified_name} while #{nested_target_table.fully_qualified_name} is locked."
541
- elsif nested_target_table.mode < target_table.mode
542
- raise PgHaMigrations::InvalidMigrationError, "Lock escalation detected! Cannot change lock level from :#{nested_target_table.mode} to :#{target_table.mode} for #{target_table.fully_qualified_name}."
600
+ if @parent_lock_tables
601
+ if !target_tables.subset?(@parent_lock_tables)
602
+ raise PgHaMigrations::InvalidMigrationError,
603
+ "Nested lock detected! Cannot acquire lock on #{target_tables.to_sql} " \
604
+ "while #{@parent_lock_tables.to_sql} is locked."
605
+ end
606
+
607
+ if @parent_lock_tables.mode < target_tables.mode
608
+ raise PgHaMigrations::InvalidMigrationError,
609
+ "Lock escalation detected! Cannot change lock level from :#{@parent_lock_tables.mode} " \
610
+ "to :#{target_tables.mode} for #{target_tables.to_sql}."
543
611
  end
544
- else
545
- Thread.current[__method__] = target_table
546
- end
547
612
 
548
- # Locking a partitioned table will also lock child tables (including sub-partitions),
549
- # so we need to check for blocking queries on those tables as well
550
- target_tables = target_table.partitions(include_sub_partitions: true, include_self: true)
613
+ # If in a nested context and all of the above checks have passed,
614
+ # we have already acquired the lock (or a lock at a higher level),
615
+ # and can simply execute the block and short-circuit.
616
+ block.call
617
+
618
+ return
619
+ end
551
620
 
552
621
  successfully_acquired_lock = false
553
622
 
554
623
  until successfully_acquired_lock
555
- while (
624
+ loop do
556
625
  blocking_transactions = PgHaMigrations::BlockingDatabaseTransactions.find_blocking_transactions("#{PgHaMigrations::LOCK_TIMEOUT_SECONDS} seconds")
557
- blocking_transactions.any? do |query|
626
+
627
+ # Locking a partitioned table will also lock child tables (including sub-partitions),
628
+ # so we need to check for blocking queries on those tables as well
629
+ target_tables_with_partitions = target_tables.with_partitions
630
+
631
+ break unless blocking_transactions.any? do |query|
558
632
  query.tables_with_locks.any? do |locked_table|
559
- target_tables.any? do |target_table|
633
+ target_tables_with_partitions.any? do |target_table|
560
634
  target_table.conflicts_with?(locked_table)
561
635
  end
562
636
  end
563
637
  end
564
- )
638
+
565
639
  say "Waiting on blocking transactions:"
566
640
  blocking_transactions.each do |blocking_transaction|
567
641
  say blocking_transaction.description
@@ -570,16 +644,21 @@ module PgHaMigrations::SafeStatements
570
644
  end
571
645
 
572
646
  connection.transaction do
573
- adjust_timeout_method = connection.postgresql_version >= 9_03_00 ? :adjust_lock_timeout : :adjust_statement_timeout
574
647
  begin
575
- method(adjust_timeout_method).call(PgHaMigrations::LOCK_TIMEOUT_SECONDS) do
576
- connection.execute("LOCK #{target_table.fully_qualified_name} IN #{target_table.mode.to_sql} MODE;")
648
+ # A lock timeout would apply to each individual table in the query,
649
+ # so we made a conscious decision to use a statement timeout here
650
+ # to keep behavior consistent in a multi-table lock scenario.
651
+ adjust_statement_timeout(PgHaMigrations::LOCK_TIMEOUT_SECONDS) do
652
+ connection.execute("LOCK #{target_tables.to_sql} IN #{target_tables.mode.to_sql} MODE;")
577
653
  end
578
654
  successfully_acquired_lock = true
579
655
  rescue ActiveRecord::StatementInvalid => e
656
+ # It is still possible to hit a lock timeout if the session has
657
+ # that value set to something less than LOCK_TIMEOUT_SECONDS.
658
+ # We should retry when either of these exceptions are raised.
580
659
  if e.message =~ /PG::LockNotAvailable.+ lock timeout/ || e.message =~ /PG::QueryCanceled.+ statement timeout/
581
660
  sleep_seconds = PgHaMigrations::LOCK_FAILURE_RETRY_DELAY_MULTLIPLIER * PgHaMigrations::LOCK_TIMEOUT_SECONDS
582
- say "Timed out trying to acquire #{target_table.mode.to_sql} lock on the #{target_table.fully_qualified_name} table."
661
+ say "Timed out trying to acquire #{target_tables.mode.to_sql} lock on #{target_tables.to_sql}."
583
662
  say "Sleeping for #{sleep_seconds}s to allow potentially queued up queries to finish before continuing."
584
663
  sleep(sleep_seconds)
585
664
 
@@ -590,12 +669,16 @@ module PgHaMigrations::SafeStatements
590
669
  end
591
670
 
592
671
  if successfully_acquired_lock
593
- block.call
672
+ @parent_lock_tables = target_tables
673
+
674
+ begin
675
+ block.call
676
+ ensure
677
+ @parent_lock_tables = nil
678
+ end
594
679
  end
595
680
  end
596
681
  end
597
- ensure
598
- Thread.current[__method__] = nil unless nested_target_table
599
682
  end
600
683
 
601
684
  def adjust_lock_timeout(timeout_seconds = PgHaMigrations::LOCK_TIMEOUT_SECONDS, &block)