pg_ha_migrations 1.7.0 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,24 +1,14 @@
1
1
  module PgHaMigrations::SafeStatements
2
- PARTITION_TYPES = %i[range list hash]
3
-
4
- PARTMAN_UPDATE_CONFIG_OPTIONS = %i[
5
- infinite_time_partitions
6
- inherit_privileges
7
- premake
8
- retention
9
- retention_keep_table
10
- ]
11
-
12
2
  def safe_added_columns_without_default_value
13
3
  @safe_added_columns_without_default_value ||= []
14
4
  end
15
5
 
16
- def safe_create_table(table, options={}, &block)
6
+ def safe_create_table(table, **options, &block)
17
7
  if options[:force]
18
8
  raise PgHaMigrations::UnsafeMigrationError.new(":force is NOT SAFE! Explicitly call unsafe_drop_table first if you want to recreate an existing table")
19
9
  end
20
10
 
21
- unsafe_create_table(table, options, &block)
11
+ unsafe_create_table(table, **options, &block)
22
12
  end
23
13
 
24
14
  def safe_create_enum_type(name, values=nil)
@@ -26,28 +16,20 @@ module PgHaMigrations::SafeStatements
26
16
  when nil
27
17
  raise ArgumentError, "safe_create_enum_type expects a set of values; if you want an enum with no values please pass an empty array"
28
18
  when []
29
- unsafe_execute("CREATE TYPE #{PG::Connection.quote_ident(name.to_s)} AS ENUM ()")
19
+ raw_execute("CREATE TYPE #{PG::Connection.quote_ident(name.to_s)} AS ENUM ()")
30
20
  else
31
21
  escaped_values = values.map do |value|
32
22
  "'#{PG::Connection.escape_string(value.to_s)}'"
33
23
  end
34
- unsafe_execute("CREATE TYPE #{PG::Connection.quote_ident(name.to_s)} AS ENUM (#{escaped_values.join(',')})")
24
+ raw_execute("CREATE TYPE #{PG::Connection.quote_ident(name.to_s)} AS ENUM (#{escaped_values.join(',')})")
35
25
  end
36
26
  end
37
27
 
38
28
  def safe_add_enum_value(name, value)
39
- unsafe_execute("ALTER TYPE #{PG::Connection.quote_ident(name.to_s)} ADD VALUE '#{PG::Connection.escape_string(value)}'")
29
+ raw_execute("ALTER TYPE #{PG::Connection.quote_ident(name.to_s)} ADD VALUE '#{PG::Connection.escape_string(value)}'")
40
30
  end
41
31
 
42
- def unsafe_rename_enum_value(name, old_value, new_value)
43
- if ActiveRecord::Base.connection.postgresql_version < 10_00_00
44
- raise PgHaMigrations::InvalidMigrationError, "Renaming an enum value is not supported on Postgres databases before version 10"
45
- end
46
-
47
- unsafe_execute("ALTER TYPE #{PG::Connection.quote_ident(name.to_s)} RENAME VALUE '#{PG::Connection.escape_string(old_value)}' TO '#{PG::Connection.escape_string(new_value)}'")
48
- end
49
-
50
- def safe_add_column(table, column, type, options = {})
32
+ def safe_add_column(table, column, type, **options)
51
33
  # Note: we don't believe we need to consider the odd case where
52
34
  # `:default => nil` or `:default => -> { null }` (or similar) is
53
35
  # passed because:
@@ -63,20 +45,14 @@ module PgHaMigrations::SafeStatements
63
45
  raise PgHaMigrations::UnsafeMigrationError.new(":default is not safe if the default value is volatile. Use safe_change_column_default afterwards then backfill the data to prevent locking the table")
64
46
  end
65
47
  elsif options[:null] == false
66
- raise PgHaMigrations::UnsafeMigrationError.new(":null => false is NOT SAFE if the table has data! If you _really_ want to do this, use unsafe_make_column_not_nullable")
48
+ raise PgHaMigrations::UnsafeMigrationError.new(":null => false is NOT SAFE if the table has data! If you want to do this, use safe_make_column_not_nullable")
67
49
  end
68
50
 
69
51
  unless options.has_key?(:default)
70
52
  self.safe_added_columns_without_default_value << [table.to_s, column.to_s]
71
53
  end
72
54
 
73
- unsafe_add_column(table, column, type, options)
74
- end
75
-
76
- def unsafe_add_column(table, column, type, options = {})
77
- safely_acquire_lock_for_table(table) do
78
- super(table, column, type, **options)
79
- end
55
+ unsafe_add_column(table, column, type, **options)
80
56
  end
81
57
 
82
58
  def safe_change_column_default(table_name, column_name, default_value)
@@ -143,22 +119,131 @@ module PgHaMigrations::SafeStatements
143
119
  end
144
120
 
145
121
  def safe_make_column_nullable(table, column)
122
+ quoted_table_name = connection.quote_table_name(table)
123
+ quoted_column_name = connection.quote_column_name(column)
124
+
146
125
  safely_acquire_lock_for_table(table) do
147
- unsafe_execute "ALTER TABLE #{table} ALTER COLUMN #{column} DROP NOT NULL"
126
+ raw_execute "ALTER TABLE #{quoted_table_name} ALTER COLUMN #{quoted_column_name} DROP NOT NULL"
148
127
  end
149
128
  end
150
129
 
151
- def unsafe_make_column_not_nullable(table, column, options={}) # options arg is only present for backwards compatiblity
130
+ # Postgres 12+ can use a valid CHECK constraint to validate that no values of a column are null, avoiding
131
+ # a full table scan while holding an exclusive lock on the table when altering a column to NOT NULL
132
+ #
133
+ # Source:
134
+ # https://dba.stackexchange.com/questions/267947/how-can-i-set-a-column-to-not-null-without-locking-the-table-during-a-table-scan/268128#268128
135
+ # (https://archive.is/X55up)
136
+ def safe_make_column_not_nullable(table, column)
137
+ if ActiveRecord::Base.connection.postgresql_version < 12_00_00
138
+ raise PgHaMigrations::InvalidMigrationError, "Cannot safely make a column non-nullable before Postgres 12"
139
+ end
140
+
141
+ validated_table = PgHaMigrations::Table.from_table_name(table)
142
+ tmp_constraint_name = "tmp_not_null_constraint_#{OpenSSL::Digest::SHA256.hexdigest(column.to_s).first(7)}"
143
+
144
+ if validated_table.check_constraints.any? { |c| c.name == tmp_constraint_name }
145
+ raise PgHaMigrations::InvalidMigrationError, "A constraint #{tmp_constraint_name.inspect} already exists. " \
146
+ "This implies that a previous invocation of this method failed and left behind a temporary constraint. " \
147
+ "Please drop the constraint before attempting to run this method again."
148
+ end
149
+
150
+ safe_add_unvalidated_check_constraint(table, "#{connection.quote_column_name(column)} IS NOT NULL", name: tmp_constraint_name)
151
+ safe_validate_check_constraint(table, name: tmp_constraint_name)
152
+
153
+ # "Ordinarily this is checked during the ALTER TABLE by scanning the entire table; however, if a
154
+ # valid CHECK constraint is found which proves no NULL can exist, then the table scan is
155
+ # skipped."
156
+ # See: https://www.postgresql.org/docs/current/sql-altertable.html#SQL-ALTERTABLE-DESC-SET-DROP-NOT-NULL
157
+ unsafe_make_column_not_nullable(table, column)
158
+ unsafe_remove_constraint(table, name: tmp_constraint_name)
159
+ end
160
+
161
+ # This method is a variant of `safe_make_column_not_nullable` that is expected to always be fast;
162
+ # i.e., it will not perform a full table scan to check for null values.
163
+ def safe_make_column_not_nullable_from_check_constraint(table, column, constraint_name:, drop_constraint: true)
164
+ unless ActiveRecord::Base.connection.postgresql_version >= 12_00_00
165
+ raise PgHaMigrations::InvalidMigrationError, "Cannot safely make a column non-nullable before Postgres 12"
166
+ end
167
+
168
+ unless constraint_name
169
+ raise ArgumentError, "Expected <constraint_name> to be present"
170
+ end
171
+ constraint_name = constraint_name.to_s
172
+
173
+ quoted_table_name = connection.quote_table_name(table)
174
+ quoted_column_name = connection.quote_column_name(column)
175
+
176
+ validated_table = PgHaMigrations::Table.from_table_name(table)
177
+ constraint = validated_table.check_constraints.find do |c|
178
+ c.name == constraint_name
179
+ end
180
+
181
+ unless constraint
182
+ raise PgHaMigrations::InvalidMigrationError, "The provided constraint does not exist"
183
+ end
184
+
185
+ unless constraint.validated
186
+ raise PgHaMigrations::InvalidMigrationError, "The provided constraint is not validated"
187
+ end
188
+
189
+ # The constraint has to actually prove that no null values exist, so the
190
+ # constraint condition can't simply include the `IS NOT NULL` check. We
191
+ # don't try to handle all possible cases here. For example,
192
+ # `a IS NOT NULL AND b IS NOT NULL` would prove what we need, but it would
193
+ # be complicated to check. We must ensure, however, that we're not too
194
+ # loose. For example, `a IS NOT NULL OR b IS NOT NULL` would not prove that
195
+ # `a IS NOT NULL`.
196
+ unless constraint.definition =~ /\ACHECK \(*(#{Regexp.escape(column.to_s)}|#{Regexp.escape(quoted_column_name)}) IS NOT NULL\)*\Z/i
197
+ raise PgHaMigrations::InvalidMigrationError, "The provided constraint does not enforce non-null values for the column"
198
+ end
199
+
200
+ # We don't want to acquire an exclusive lock on the table twice, and we also don't want it to be
201
+ # posssible to have the NOT NULL constraint addition succeed while the constraint removal fails,
202
+ # so we acquire the lock once and do both operations in the same block.
152
203
  safely_acquire_lock_for_table(table) do
153
- unsafe_execute "ALTER TABLE #{table} ALTER COLUMN #{column} SET NOT NULL"
204
+ # "Ordinarily this is checked during the ALTER TABLE by scanning the entire table; however, if a
205
+ # valid CHECK constraint is found which proves no NULL can exist, then the table scan is
206
+ # skipped."
207
+ # See: https://www.postgresql.org/docs/current/sql-altertable.html#SQL-ALTERTABLE-DESC-SET-DROP-NOT-NULL
208
+ unsafe_make_column_not_nullable(table, column)
209
+
210
+ if drop_constraint
211
+ unsafe_remove_constraint(table, name: constraint_name)
212
+ end
213
+ end
214
+ end
215
+
216
+ def safe_add_index_on_empty_table(table, columns, **options)
217
+ if options[:algorithm] == :concurrently
218
+ raise ArgumentError, "Cannot call safe_add_index_on_empty_table with :algorithm => :concurrently"
219
+ end
220
+
221
+ # Check if nulls_not_distinct was provided but PostgreSQL version doesn't support it
222
+ if options[:nulls_not_distinct] && ActiveRecord::Base.connection.postgresql_version < 15_00_00
223
+ raise PgHaMigrations::InvalidMigrationError, "nulls_not_distinct option requires PostgreSQL 15 or higher"
224
+ end
225
+
226
+ # Avoids taking out an unnecessary SHARE lock if the table does have data
227
+ ensure_small_table!(table, empty: true)
228
+
229
+ safely_acquire_lock_for_table(table, mode: :share) do
230
+ # Ensure data wasn't written in the split second after the first check
231
+ ensure_small_table!(table, empty: true)
232
+
233
+ unsafe_add_index(table, columns, **options)
154
234
  end
155
235
  end
156
236
 
157
- def safe_add_concurrent_index(table, columns, options={})
237
+ def safe_add_concurrent_index(table, columns, **options)
238
+ # Check if nulls_not_distinct was provided but PostgreSQL version doesn't support it
239
+ if options[:nulls_not_distinct] && ActiveRecord::Base.connection.postgresql_version < 15_00_00
240
+ raise PgHaMigrations::InvalidMigrationError, "nulls_not_distinct option requires PostgreSQL 15 or higher"
241
+ end
242
+
158
243
  unsafe_add_index(table, columns, **options.merge(:algorithm => :concurrently))
159
244
  end
160
245
 
161
- def safe_remove_concurrent_index(table, options={})
246
+ def safe_remove_concurrent_index(table, **options)
162
247
  unless options.is_a?(Hash) && options.key?(:name)
163
248
  raise ArgumentError, "Expected safe_remove_concurrent_index to be called with arguments (table_name, :name => ...)"
164
249
  end
@@ -170,8 +255,96 @@ module PgHaMigrations::SafeStatements
170
255
  unsafe_remove_index(table, **options.merge(:algorithm => :concurrently))
171
256
  end
172
257
 
258
+ def safe_add_concurrent_partitioned_index(
259
+ table,
260
+ columns,
261
+ name: nil,
262
+ if_not_exists: nil,
263
+ using: nil,
264
+ unique: nil,
265
+ where: nil,
266
+ comment: nil,
267
+ nulls_not_distinct: nil
268
+ )
269
+ # Check if nulls_not_distinct was provided but PostgreSQL version doesn't support it
270
+ if !nulls_not_distinct.nil? && ActiveRecord::Base.connection.postgresql_version < 15_00_00
271
+ raise PgHaMigrations::InvalidMigrationError, "nulls_not_distinct option requires PostgreSQL 15 or higher"
272
+ end
273
+
274
+ if ActiveRecord::Base.connection.postgresql_version < 11_00_00
275
+ raise PgHaMigrations::InvalidMigrationError, "Concurrent partitioned index creation not supported on Postgres databases before version 11"
276
+ end
277
+
278
+ parent_table = PgHaMigrations::Table.from_table_name(table)
279
+
280
+ raise PgHaMigrations::InvalidMigrationError, "Table #{parent_table.inspect} is not a partitioned table" unless parent_table.natively_partitioned?
281
+
282
+ parent_index = if name.present?
283
+ PgHaMigrations::Index.new(name, parent_table)
284
+ else
285
+ PgHaMigrations::Index.from_table_and_columns(parent_table, columns)
286
+ end
287
+
288
+ # Short-circuit when if_not_exists: true and index already valid
289
+ return if if_not_exists && parent_index.valid?
290
+
291
+ child_indexes = parent_table.partitions.map do |child_table|
292
+ PgHaMigrations::Index.from_table_and_columns(child_table, columns)
293
+ end
294
+
295
+ # CREATE INDEX ON ONLY parent_table
296
+ unsafe_add_index(
297
+ parent_table.fully_qualified_name,
298
+ columns,
299
+ name: parent_index.name,
300
+ if_not_exists: if_not_exists,
301
+ using: using,
302
+ unique: unique,
303
+ nulls_not_distinct: nulls_not_distinct,
304
+ where: where,
305
+ comment: comment,
306
+ algorithm: :only, # see lib/pg_ha_migrations/hacks/add_index_on_only.rb
307
+ )
308
+
309
+ child_indexes.each do |child_index|
310
+ add_index_method = if child_index.table.natively_partitioned?
311
+ :safe_add_concurrent_partitioned_index
312
+ else
313
+ :safe_add_concurrent_index
314
+ end
315
+
316
+ send(
317
+ add_index_method,
318
+ child_index.table.fully_qualified_name,
319
+ columns,
320
+ name: child_index.name,
321
+ if_not_exists: if_not_exists,
322
+ using: using,
323
+ unique: unique,
324
+ nulls_not_distinct: nulls_not_distinct,
325
+ where: where,
326
+ )
327
+ end
328
+
329
+ # Avoid taking out an unnecessary lock if there are no child tables to attach
330
+ if child_indexes.present?
331
+ safely_acquire_lock_for_table(parent_table.fully_qualified_name) do
332
+ child_indexes.each do |child_index|
333
+ say_with_time "Attaching index #{child_index.inspect} to #{parent_index.inspect}" do
334
+ connection.execute(<<~SQL)
335
+ ALTER INDEX #{parent_index.fully_qualified_name}
336
+ ATTACH PARTITION #{child_index.fully_qualified_name}
337
+ SQL
338
+ end
339
+ end
340
+ end
341
+ end
342
+
343
+ raise PgHaMigrations::InvalidMigrationError, "Unexpected state. Parent index #{parent_index.inspect} is invalid" unless parent_index.valid?
344
+ end
345
+
173
346
  def safe_set_maintenance_work_mem_gb(gigabytes)
174
- unsafe_execute("SET maintenance_work_mem = '#{PG::Connection.escape_string(gigabytes.to_s)} GB'")
347
+ raw_execute("SET maintenance_work_mem = '#{PG::Connection.escape_string(gigabytes.to_s)} GB'")
175
348
  end
176
349
 
177
350
  def safe_add_unvalidated_check_constraint(table, expression, name:)
@@ -206,25 +379,11 @@ module PgHaMigrations::SafeStatements
206
379
  end
207
380
  end
208
381
 
209
- def unsafe_remove_constraint(table, name:)
210
- raise ArgumentError, "Expected <name> to be present" unless name.present?
211
-
212
- quoted_table_name = connection.quote_table_name(table)
213
- quoted_constraint_name = connection.quote_table_name(name)
214
- sql = "ALTER TABLE #{quoted_table_name} DROP CONSTRAINT #{quoted_constraint_name}"
215
-
216
- safely_acquire_lock_for_table(table) do
217
- say_with_time "remove_constraint(#{table.inspect}, name: #{name.inspect})" do
218
- connection.execute(sql)
219
- end
220
- end
221
- end
222
-
223
382
  def safe_create_partitioned_table(table, partition_key:, type:, infer_primary_key: nil, **options, &block)
224
383
  raise ArgumentError, "Expected <partition_key> to be present" unless partition_key.present?
225
384
 
226
- unless PARTITION_TYPES.include?(type)
227
- raise ArgumentError, "Expected <type> to be symbol in #{PARTITION_TYPES} but received #{type.inspect}"
385
+ unless PgHaMigrations::PARTITION_TYPES.include?(type)
386
+ raise ArgumentError, "Expected <type> to be symbol in #{PgHaMigrations::PARTITION_TYPES} but received #{type.inspect}"
228
387
  end
229
388
 
230
389
  if ActiveRecord::Base.connection.postgresql_version < 10_00_00
@@ -261,7 +420,7 @@ module PgHaMigrations::SafeStatements
261
420
 
262
421
  options[:options] = "PARTITION BY #{type.upcase} (#{quoted_partition_key})"
263
422
 
264
- safe_create_table(table, options) do |td|
423
+ safe_create_table(table, **options) do |td|
265
424
  block.call(td) if block
266
425
 
267
426
  next unless options[:id]
@@ -280,15 +439,7 @@ module PgHaMigrations::SafeStatements
280
439
  end
281
440
  end
282
441
 
283
- def safe_partman_create_parent(table, **options)
284
- if options[:retention].present? || options[:retention_keep_table] == false
285
- raise PgHaMigrations::UnsafeMigrationError.new(":retention and/or :retention_keep_table => false can potentially result in data loss if misconfigured. Please use unsafe_partman_create_parent if you want to set these options")
286
- end
287
-
288
- unsafe_partman_create_parent(table, **options)
289
- end
290
-
291
- def unsafe_partman_create_parent(
442
+ def safe_partman_create_parent(
292
443
  table,
293
444
  partition_key:,
294
445
  interval:,
@@ -351,7 +502,7 @@ module PgHaMigrations::SafeStatements
351
502
  retention_keep_table: retention_keep_table,
352
503
  }.compact
353
504
 
354
- unsafe_partman_update_config(create_parent_options[:parent_table], **update_config_options)
505
+ unsafe_partman_update_config(table, **update_config_options)
355
506
  end
356
507
 
357
508
  def safe_partman_update_config(table, **options)
@@ -362,26 +513,6 @@ module PgHaMigrations::SafeStatements
362
513
  unsafe_partman_update_config(table, **options)
363
514
  end
364
515
 
365
- def unsafe_partman_update_config(table, **options)
366
- invalid_options = options.keys - PARTMAN_UPDATE_CONFIG_OPTIONS
367
-
368
- raise ArgumentError, "Unrecognized argument(s): #{invalid_options}" unless invalid_options.empty?
369
-
370
- PgHaMigrations::PartmanConfig.schema = _quoted_partman_schema
371
-
372
- config = PgHaMigrations::PartmanConfig.find(_fully_qualified_table_name_for_partman(table))
373
-
374
- config.assign_attributes(**options)
375
-
376
- inherit_privileges_changed = config.inherit_privileges_changed?
377
-
378
- say_with_time "partman_update_config(#{table.inspect}, #{options.map { |k,v| "#{k}: #{v.inspect}" }.join(", ")})" do
379
- config.save!
380
- end
381
-
382
- safe_partman_reapply_privileges(table) if inherit_privileges_changed
383
- end
384
-
385
516
  def safe_partman_reapply_privileges(table)
386
517
  say_with_time "partman_reapply_privileges(#{table.inspect})" do
387
518
  connection.execute("SELECT #{_quoted_partman_schema}.reapply_privileges('#{_fully_qualified_table_name_for_partman(table)}')")
@@ -402,38 +533,13 @@ module PgHaMigrations::SafeStatements
402
533
  end
403
534
 
404
535
  def _fully_qualified_table_name_for_partman(table)
405
- identifiers = table.to_s.split(".")
406
-
407
- raise PgHaMigrations::InvalidMigrationError, "Expected table to be in the format <table> or <schema>.<table> but received #{table}" if identifiers.size > 2
408
-
409
- identifiers.each { |identifier| _validate_partman_identifier(identifier) }
410
-
411
- schema_conditional = if identifiers.size > 1
412
- "'#{identifiers.first}'"
413
- else
414
- "ANY (current_schemas(false))"
415
- end
416
-
417
- schema = connection.select_value(<<~SQL)
418
- SELECT schemaname
419
- FROM pg_tables
420
- WHERE tablename = '#{identifiers.last}' AND schemaname = #{schema_conditional}
421
- ORDER BY array_position(current_schemas(false), schemaname)
422
- LIMIT 1
423
- SQL
424
-
425
- raise PgHaMigrations::InvalidMigrationError, "Could not find table #{table}" unless schema.present?
426
-
427
- _validate_partman_identifier(schema)
428
-
429
- # Quoting is unneeded since _validate_partman_identifier ensures the schema / table use standard naming conventions
430
- "#{schema}.#{identifiers.last}"
431
- end
536
+ table = PgHaMigrations::Table.from_table_name(table)
432
537
 
433
- def _validate_partman_identifier(identifier)
434
- if identifier.to_s !~ /^[a-z_][a-z_\d]*$/
435
- raise PgHaMigrations::InvalidMigrationError, "Partman requires schema / table names to be lowercase with underscores"
436
- end
538
+ [table.schema, table.name].each do |identifier|
539
+ if identifier.to_s !~ /^[a-z_][a-z_\d]*$/
540
+ raise PgHaMigrations::InvalidMigrationError, "Partman requires schema / table names to be lowercase with underscores"
541
+ end
542
+ end.join(".")
437
543
  end
438
544
 
439
545
  def _per_migration_caller
@@ -463,18 +569,50 @@ module PgHaMigrations::SafeStatements
463
569
  super(conn, direction)
464
570
  end
465
571
 
466
- def safely_acquire_lock_for_table(table, &block)
572
+ def safely_acquire_lock_for_table(*tables, mode: :access_exclusive, &block)
467
573
  _check_postgres_adapter!
468
- table = table.to_s
469
- quoted_table_name = connection.quote_table_name(table)
574
+
575
+ target_tables = PgHaMigrations::TableCollection.from_table_names(tables, mode)
576
+
577
+ if @parent_lock_tables
578
+ if !target_tables.subset?(@parent_lock_tables)
579
+ raise PgHaMigrations::InvalidMigrationError,
580
+ "Nested lock detected! Cannot acquire lock on #{target_tables.to_sql} " \
581
+ "while #{@parent_lock_tables.to_sql} is locked."
582
+ end
583
+
584
+ if @parent_lock_tables.mode < target_tables.mode
585
+ raise PgHaMigrations::InvalidMigrationError,
586
+ "Lock escalation detected! Cannot change lock level from :#{@parent_lock_tables.mode} " \
587
+ "to :#{target_tables.mode} for #{target_tables.to_sql}."
588
+ end
589
+
590
+ # If in a nested context and all of the above checks have passed,
591
+ # we have already acquired the lock (or a lock at a higher level),
592
+ # and can simply execute the block and short-circuit.
593
+ block.call
594
+
595
+ return
596
+ end
470
597
 
471
598
  successfully_acquired_lock = false
472
599
 
473
600
  until successfully_acquired_lock
474
- while (
601
+ loop do
475
602
  blocking_transactions = PgHaMigrations::BlockingDatabaseTransactions.find_blocking_transactions("#{PgHaMigrations::LOCK_TIMEOUT_SECONDS} seconds")
476
- blocking_transactions.any? { |query| query.tables_with_locks.include?(table) }
477
- )
603
+
604
+ # Locking a partitioned table will also lock child tables (including sub-partitions),
605
+ # so we need to check for blocking queries on those tables as well
606
+ target_tables_with_partitions = target_tables.with_partitions
607
+
608
+ break unless blocking_transactions.any? do |query|
609
+ query.tables_with_locks.any? do |locked_table|
610
+ target_tables_with_partitions.any? do |target_table|
611
+ target_table.conflicts_with?(locked_table)
612
+ end
613
+ end
614
+ end
615
+
478
616
  say "Waiting on blocking transactions:"
479
617
  blocking_transactions.each do |blocking_transaction|
480
618
  say blocking_transaction.description
@@ -483,16 +621,21 @@ module PgHaMigrations::SafeStatements
483
621
  end
484
622
 
485
623
  connection.transaction do
486
- adjust_timeout_method = connection.postgresql_version >= 9_03_00 ? :adjust_lock_timeout : :adjust_statement_timeout
487
624
  begin
488
- method(adjust_timeout_method).call(PgHaMigrations::LOCK_TIMEOUT_SECONDS) do
489
- connection.execute("LOCK #{quoted_table_name};")
625
+ # A lock timeout would apply to each individual table in the query,
626
+ # so we made a conscious decision to use a statement timeout here
627
+ # to keep behavior consistent in a multi-table lock scenario.
628
+ adjust_statement_timeout(PgHaMigrations::LOCK_TIMEOUT_SECONDS) do
629
+ connection.execute("LOCK #{target_tables.to_sql} IN #{target_tables.mode.to_sql} MODE;")
490
630
  end
491
631
  successfully_acquired_lock = true
492
632
  rescue ActiveRecord::StatementInvalid => e
633
+ # It is still possible to hit a lock timeout if the session has
634
+ # that value set to something less than LOCK_TIMEOUT_SECONDS.
635
+ # We should retry when either of these exceptions are raised.
493
636
  if e.message =~ /PG::LockNotAvailable.+ lock timeout/ || e.message =~ /PG::QueryCanceled.+ statement timeout/
494
637
  sleep_seconds = PgHaMigrations::LOCK_FAILURE_RETRY_DELAY_MULTLIPLIER * PgHaMigrations::LOCK_TIMEOUT_SECONDS
495
- say "Timed out trying to acquire an exclusive lock on the #{quoted_table_name} table."
638
+ say "Timed out trying to acquire #{target_tables.mode.to_sql} lock on #{target_tables.to_sql}."
496
639
  say "Sleeping for #{sleep_seconds}s to allow potentially queued up queries to finish before continuing."
497
640
  sleep(sleep_seconds)
498
641
 
@@ -503,7 +646,13 @@ module PgHaMigrations::SafeStatements
503
646
  end
504
647
 
505
648
  if successfully_acquired_lock
506
- block.call
649
+ @parent_lock_tables = target_tables
650
+
651
+ begin
652
+ block.call
653
+ ensure
654
+ @parent_lock_tables = nil
655
+ end
507
656
  end
508
657
  end
509
658
  end
@@ -548,4 +697,16 @@ module PgHaMigrations::SafeStatements
548
697
  end
549
698
  end
550
699
  end
700
+
701
+ def ensure_small_table!(table, empty: false, threshold: PgHaMigrations::SMALL_TABLE_THRESHOLD_BYTES)
702
+ table = PgHaMigrations::Table.from_table_name(table)
703
+
704
+ if empty && table.has_rows?
705
+ raise PgHaMigrations::InvalidMigrationError, "Table #{table.inspect} has rows"
706
+ end
707
+
708
+ if table.total_bytes > threshold
709
+ raise PgHaMigrations::InvalidMigrationError, "Table #{table.inspect} is larger than #{threshold} bytes"
710
+ end
711
+ end
551
712
  end