pg_ha_migrations 1.7.0 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/ci.yml +8 -8
- data/.ruby-version +1 -1
- data/Appraisals +8 -4
- data/Dockerfile +2 -2
- data/Gemfile +0 -1
- data/README.md +179 -44
- data/Rakefile +2 -0
- data/bin/setup +3 -1
- data/docker-compose.yml +1 -1
- data/gemfiles/{rails_6.1.gemfile → rails_7.1.gemfile} +1 -1
- data/gemfiles/{rails_7.0.gemfile → rails_7.2.gemfile} +1 -1
- data/gemfiles/rails_8.0.gemfile +7 -0
- data/lib/pg_ha_migrations/allowed_versions.rb +1 -1
- data/lib/pg_ha_migrations/blocking_database_transactions.rb +10 -5
- data/lib/pg_ha_migrations/constraint.rb +1 -0
- data/lib/pg_ha_migrations/hacks/add_index_on_only.rb +30 -0
- data/lib/pg_ha_migrations/hacks/disable_ddl_transaction.rb +0 -1
- data/lib/pg_ha_migrations/lock_mode.rb +112 -0
- data/lib/pg_ha_migrations/relation.rb +224 -0
- data/lib/pg_ha_migrations/safe_statements.rb +288 -127
- data/lib/pg_ha_migrations/unsafe_statements.rb +159 -31
- data/lib/pg_ha_migrations/version.rb +1 -1
- data/lib/pg_ha_migrations.rb +22 -1
- data/pg_ha_migrations.gemspec +3 -3
- metadata +18 -16
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: cd0329ccdae5b3bf68ac759bc0eca2dbc5089c3d91f9ee6444c6791a2bd42e93
|
4
|
+
data.tar.gz: ecf840233b36ede3ef278411bac124cefa32036b43d62119792a8d74ac126b5e
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: f744006470a25bff85e10026f750e1cada2b49a19809cc2279208f9ec10ac86bfacf38dbbe32bd839f655475d0dde773d1c219b584c4a5cccc7c911f570c10bd
|
7
|
+
data.tar.gz: 5626ac149515ef764e63797cceed133c87d6032f22100a4940573a42a0b1ae483cbebc8a26596cc77494c4420e94e55d08e74b44ebe6fb83cd7f18e1a502d731
|
data/.github/workflows/ci.yml
CHANGED
@@ -5,18 +5,18 @@ jobs:
|
|
5
5
|
strategy:
|
6
6
|
matrix:
|
7
7
|
pg:
|
8
|
-
- 11
|
9
|
-
- 12
|
10
8
|
- 13
|
11
9
|
- 14
|
12
10
|
- 15
|
11
|
+
- 16
|
13
12
|
ruby:
|
14
|
-
- 3.
|
15
|
-
- 3.
|
16
|
-
- 3.
|
13
|
+
- "3.2"
|
14
|
+
- "3.3"
|
15
|
+
- "3.4"
|
17
16
|
gemfile:
|
18
|
-
-
|
19
|
-
- rails_7.
|
17
|
+
- rails_7.1
|
18
|
+
- rails_7.2
|
19
|
+
- rails_8.0
|
20
20
|
name: PostgreSQL ${{ matrix.pg }} - Ruby ${{ matrix.ruby }} - ${{ matrix.gemfile }}
|
21
21
|
runs-on: ubuntu-latest
|
22
22
|
env: # $BUNDLE_GEMFILE must be set at the job level, so it is set for all steps
|
@@ -25,7 +25,7 @@ jobs:
|
|
25
25
|
steps:
|
26
26
|
- uses: actions/checkout@v3
|
27
27
|
- name: Build postgres image and start the container
|
28
|
-
run: docker
|
28
|
+
run: docker compose up -d --build
|
29
29
|
env:
|
30
30
|
PGVERSION: ${{ matrix.pg }}
|
31
31
|
- name: Setup Ruby using .ruby-version file
|
data/.ruby-version
CHANGED
@@ -1 +1 @@
|
|
1
|
-
ruby-3.
|
1
|
+
ruby-3.4.2
|
data/Appraisals
CHANGED
@@ -1,7 +1,11 @@
|
|
1
|
-
appraise "rails-
|
2
|
-
gem "rails", "
|
1
|
+
appraise "rails-7.1" do
|
2
|
+
gem "rails", "~> 7.1.0"
|
3
3
|
end
|
4
4
|
|
5
|
-
appraise "rails-7.
|
6
|
-
gem "rails", "7.0
|
5
|
+
appraise "rails-7.2" do
|
6
|
+
gem "rails", "~> 7.2.0"
|
7
|
+
end
|
8
|
+
|
9
|
+
appraise "rails-8.0" do
|
10
|
+
gem "rails", "~> 8.0.0"
|
7
11
|
end
|
data/Dockerfile
CHANGED
@@ -6,6 +6,6 @@ RUN apt-get update && apt-get install -y curl ca-certificates gnupg lsb-release
|
|
6
6
|
|
7
7
|
RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor | tee /etc/apt/trusted.gpg.d/apt.postgresql.org.gpg >/dev/null
|
8
8
|
|
9
|
-
RUN echo "deb
|
9
|
+
RUN echo "deb https://apt-archive.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg-archive main" > /etc/apt/sources.list.d/pgdg.list
|
10
10
|
|
11
|
-
RUN apt update && apt-get install -y postgresql-$PG_MAJOR-partman
|
11
|
+
RUN apt update && apt-get install -y postgresql-$PG_MAJOR-partman=4.7.4-2.pgdg110+1
|
data/Gemfile
CHANGED
data/README.md
CHANGED
@@ -30,7 +30,36 @@ Or install it yourself as:
|
|
30
30
|
|
31
31
|
$ gem install pg_ha_migrations
|
32
32
|
|
33
|
-
##
|
33
|
+
## Migration Safety
|
34
|
+
|
35
|
+
There are two major classes of concerns we try to handle in the API:
|
36
|
+
|
37
|
+
- Database safety (e.g., long-held locks)
|
38
|
+
- Application safety (e.g., dropping columns the app uses)
|
39
|
+
|
40
|
+
### Migration Method Renaming
|
41
|
+
|
42
|
+
We rename migration methods with prefixes to explicitly denote their safety level:
|
43
|
+
|
44
|
+
- `safe_*`: These methods check for both application and database safety concerns, prefer concurrent operations where available, set low lock timeouts where appropriate, and decompose operations into multiple safe steps.
|
45
|
+
- `unsafe_*`: Using these methods is a signal that the DDL operation is not necessarily safe for a running application. They include basic safety features like safe lock acquisition and dependent object checking, but otherwise dispatch directly to the native ActiveRecord migration method.
|
46
|
+
- `raw_*`: These methods are a direct dispatch to the native ActiveRecord migration method.
|
47
|
+
|
48
|
+
Calling the original migration methods without a prefix will raise an error.
|
49
|
+
|
50
|
+
The API is designed to be explicit yet remain flexible. There may be situations where invoking the `unsafe_*` method is preferred (or the only option available for definitionally unsafe operations).
|
51
|
+
|
52
|
+
While `unsafe_*` methods were historically (before 2.0) pure wrappers for invoking the native ActiveRecord migration method, there is a class of problems that we can't handle easily without breaking that design rule a bit. For example, dropping a column is unsafe from an application perspective, so we make the application safety concerns explicit by using an `unsafe_` prefix. Using `unsafe_remove_column` calls out the need to audit the application to confirm the migration won't break the application. Because there are no safe alternatives we don't define a `safe_remove_column` analogue. However there are still conditions we'd like to assert before dropping a column. For example, dropping an unused column that's used in one or more indexes may be safe from an application perspective, but the cascading drop of the index won't use a `CONCURRENT` operation to drop the dependent indexes and is therefore unsafe from a database perspective.
|
53
|
+
|
54
|
+
For `unsafe_*` migration methods which support checks of this type you can bypass the checks by passing an `:allow_dependent_objects` key in the method's `options` hash containing an array of dependent object types you'd like to allow. These checks will run by default, but you can opt-out by setting `config.check_for_dependent_objects = false` [in your configuration initializer](#configuration).
|
55
|
+
|
56
|
+
### Disallowed Migration Methods
|
57
|
+
|
58
|
+
We disallow the use of `unsafe_change_table`, as the equivalent operation can be composed with explicit `safe_*` / `unsafe_*` methods. If you _must_ use `change_table`, it is still available as `raw_change_table`.
|
59
|
+
|
60
|
+
### Migration Method Arguments
|
61
|
+
|
62
|
+
We believe the `force: true` option to ActiveRecord's `create_table` method is always unsafe because it's not possible to denote exactly how the current state will change. Therefore we disallow using `force: true` even when calling `unsafe_create_table`. This option is enabled by default, but you can opt-out by setting `config.allow_force_create_table = true` [in your configuration initializer](#configuration).
|
34
63
|
|
35
64
|
### Rollback
|
36
65
|
|
@@ -46,36 +75,37 @@ end
|
|
46
75
|
|
47
76
|
and never use `def change`. We believe that this is the only safe approach in production environments. For development environments we iterate by recreating the database from scratch every time we make a change.
|
48
77
|
|
49
|
-
###
|
50
|
-
|
51
|
-
There are two major classes of concerns we try to handle in the API:
|
52
|
-
|
53
|
-
- Database safety (e.g., long-held locks)
|
54
|
-
- Application safety (e.g., dropping columns the app uses)
|
55
|
-
|
56
|
-
We rename migration methods with prefixes denoting their safety level:
|
57
|
-
|
58
|
-
- `safe_*`: These methods check for both application and database safety concerns, prefer concurrent operations where available, set low lock timeouts where appropriate, and decompose operations into multiple safe steps.
|
59
|
-
- `unsafe_*`: These methods are generally a direct dispatch to the native ActiveRecord migration method.
|
78
|
+
### Transactional DDL
|
60
79
|
|
61
|
-
|
80
|
+
Individual DDL statements in PostgreSQL are transactional by default (as are all Postgres statements). Concurrent index creation and removal are two exceptions: these utility commands manage their own transaction state (and each uses multiple transactions to achieve the desired concurrency).
|
62
81
|
|
63
|
-
|
82
|
+
We [disable ActiveRecord's DDL transactions](./lib/pg_ha_migrations/hacks/disable_ddl_transaction.rb) (which wrap the entire migration file in a transaction) by default for the following reasons:
|
64
83
|
|
65
|
-
|
84
|
+
* [Running multiple DDL statements inside a transaction acquires exclusive locks on all of the modified objects](https://medium.com/paypal-tech/postgresql-at-scale-database-schema-changes-without-downtime-20d3749ed680#cc22).
|
85
|
+
* Acquired locks are held until the end of the transaction.
|
86
|
+
* Multiple locks creates the possibility of deadlocks.
|
87
|
+
* Increased exposure to long waits:
|
88
|
+
* Each newly acquired lock has its own timeout applied (so total lock time is additive).
|
89
|
+
* [Safe lock acquisition](#safely_acquire_lock_for_table) (which is used in each migration method where locks will be acquired) can issue multiple lock attempts on lock timeouts (with sleep delays between attempts).
|
66
90
|
|
67
|
-
|
91
|
+
Because of the above issues attempting to re-enable transaction migrations forfeits many of the safety guarantees this library provides and may even break certain functionally. If you'd like to experiment with it anyway you can re-enable transactional migrations by adding `self.disable_ddl_transaction = false` to your migration class definition.
|
68
92
|
|
69
|
-
|
93
|
+
## Usage
|
70
94
|
|
71
|
-
|
95
|
+
### Unsupported ActiveRecord Features
|
72
96
|
|
73
97
|
The following functionality is currently unsupported:
|
74
98
|
|
75
|
-
-
|
99
|
+
- [Rollback methods in migrations](#rollback)
|
76
100
|
- Generators
|
77
101
|
- schema.rb
|
78
102
|
|
103
|
+
### Compatibility Notes
|
104
|
+
|
105
|
+
- While some features may work with other versions, this gem is currently tested against PostgreSQL 13+ and Partman 4.x
|
106
|
+
|
107
|
+
### Migration Methods
|
108
|
+
|
79
109
|
#### safe\_create\_table
|
80
110
|
|
81
111
|
Safely creates a new table.
|
@@ -148,7 +178,7 @@ safe_change_column_default :table, :column, -> { "NOW()" }
|
|
148
178
|
safe_change_column_default :table, :column, -> { "'NOW()'" }
|
149
179
|
```
|
150
180
|
|
151
|
-
Note: On Postgres 11+ adding a column with a constant default value does not rewrite or scan the table (under a lock or otherwise). In that case a migration adding a column with a default should do so in a single operation rather than the two-step `safe_add_column` followed by `safe_change_column_default`. We enforce this best practice with the error `PgHaMigrations::BestPracticeError`, but if your prefer otherwise (or are running in a mixed Postgres version environment), you may opt out by setting `config.prefer_single_step_column_addition_with_default =
|
181
|
+
Note: On Postgres 11+ adding a column with a constant default value does not rewrite or scan the table (under a lock or otherwise). In that case a migration adding a column with a default should do so in a single operation rather than the two-step `safe_add_column` followed by `safe_change_column_default`. We enforce this best practice with the error `PgHaMigrations::BestPracticeError`, but if your prefer otherwise (or are running in a mixed Postgres version environment), you may opt out by setting `config.prefer_single_step_column_addition_with_default = false` [in your configuration initializer](#configuration).
|
152
182
|
|
153
183
|
#### safe\_make\_column\_nullable
|
154
184
|
|
@@ -157,6 +187,19 @@ Safely make the column nullable.
|
|
157
187
|
```ruby
|
158
188
|
safe_make_column_nullable :table, :column
|
159
189
|
```
|
190
|
+
#### safe\_make\_column\_not\_nullable
|
191
|
+
|
192
|
+
Safely make the column not nullable - adds a temporary constraint and uses that constraint to validate no values are null before altering the column, then removes the temporary constraint.
|
193
|
+
|
194
|
+
```ruby
|
195
|
+
safe_make_column_not_nullable :table, :column
|
196
|
+
```
|
197
|
+
|
198
|
+
> **Note:**
|
199
|
+
> - This method performs a full table scan to validate that no NULL values exist in the column. While no exclusive lock is held for this scan, on large tables the scan may take a long time.
|
200
|
+
> - The method runs multiple DDL statements non-transactionally. Validating the constraint can fail. In such cases an exception will be raised, and an INVALID constraint will be left on the table.
|
201
|
+
|
202
|
+
If you want to avoid a full table scan and have already added and validated a suitable CHECK constraint, consider using [`safe_make_column_not_nullable_from_check_constraint`](#safe_make_column_not_nullable_from_check_constraint) instead.
|
160
203
|
|
161
204
|
#### unsafe\_make\_column\_not\_nullable
|
162
205
|
|
@@ -166,6 +209,31 @@ Unsafely make a column not nullable.
|
|
166
209
|
unsafe_make_column_not_nullable :table, :column
|
167
210
|
```
|
168
211
|
|
212
|
+
#### safe\_make\_column\_not\_nullable\_from\_check\_constraint
|
213
|
+
|
214
|
+
Variant of `safe_make_column_not_nullable` that safely makes a column NOT NULL using an existing validated CHECK constraint that enforces non-null values for the column. This method is expected to always be fast because it avoids a full table scan.
|
215
|
+
|
216
|
+
```ruby
|
217
|
+
safe_make_column_not_nullable_from_check_constraint :table, :column, constraint_name: :constraint_name
|
218
|
+
```
|
219
|
+
|
220
|
+
- `constraint_name` (required): The name of a validated CHECK constraint that enforces `column IS NOT NULL`.
|
221
|
+
- `drop_constraint:` (optional, default: true): Whether to drop the constraint after making the column NOT NULL.
|
222
|
+
|
223
|
+
You should use [`safe_make_column_not_nullable`](#safe_make_column_not_nullable) when neither a CHECK constraint or a NOT NULL constraint exists already. You should use this method when you already have an equivalent CHECK constraint on the table.
|
224
|
+
|
225
|
+
This method will raise an error if the constraint does not exist, is not validated, or does not strictly enforce non-null values for the column.
|
226
|
+
|
227
|
+
> **Note:** We do not attempt to catch all possible proofs of `column IS NOT NULL` by means of an existing constraint; only a constraint with the exact definition `column IS NOT NULL` will be recognized.
|
228
|
+
|
229
|
+
#### safe\_add\_index\_on\_empty\_table
|
230
|
+
|
231
|
+
Safely add an index on a table with zero rows. This will raise an error if the table contains data.
|
232
|
+
|
233
|
+
```ruby
|
234
|
+
safe_add_index_on_empty_table :table, :column
|
235
|
+
```
|
236
|
+
|
169
237
|
#### safe\_add\_concurrent\_index
|
170
238
|
|
171
239
|
Add an index concurrently.
|
@@ -188,6 +256,41 @@ Safely remove an index. Migrations that contain this statement must also include
|
|
188
256
|
safe_remove_concurrent_index :table, :name => :index_name
|
189
257
|
```
|
190
258
|
|
259
|
+
#### safe\_add\_concurrent\_partitioned\_index
|
260
|
+
|
261
|
+
Add an index to a natively partitioned table concurrently, as described in the [table partitioning docs](https://www.postgresql.org/docs/current/ddl-partitioning.html):
|
262
|
+
|
263
|
+
> To avoid long lock times, it is possible to use `CREATE INDEX ON ONLY` the partitioned table; such an index is marked invalid, and the partitions do not get the index applied automatically.
|
264
|
+
> The indexes on partitions can be created individually using `CONCURRENTLY`, and then attached to the index on the parent using `ALTER INDEX .. ATTACH PARTITION`.
|
265
|
+
> Once indexes for all partitions are attached to the parent index, the parent index is marked valid automatically.
|
266
|
+
|
267
|
+
```ruby
|
268
|
+
# Assuming this table has partitions child1 and child2, the following indexes will be created:
|
269
|
+
# - index_partitioned_table_on_column
|
270
|
+
# - index_child1_on_column (attached to index_partitioned_table_on_column)
|
271
|
+
# - index_child2_on_column (attached to index_partitioned_table_on_column)
|
272
|
+
safe_add_concurrent_partitioned_index :partitioned_table, :column
|
273
|
+
```
|
274
|
+
|
275
|
+
Add a composite index using the `hash` index type with custom name for the parent index when the parent table contains sub-partitions.
|
276
|
+
|
277
|
+
```ruby
|
278
|
+
# Assuming this table has partitions child1 and child2, and child1 has sub-partitions sub1 and sub2,
|
279
|
+
# the following indexes will be created:
|
280
|
+
# - custom_name_idx
|
281
|
+
# - index_child1_on_column1_column2 (attached to custom_name_idx)
|
282
|
+
# - index_sub1_on_column1_column2 (attached to index_child1_on_column1_column2)
|
283
|
+
# - index_sub2_on_column1_column2 (attached to index_child1_on_column1_column2)
|
284
|
+
# - index_child2_on_column1_column2 (attached to custom_name_idx)
|
285
|
+
safe_add_concurrent_partitioned_index :partitioned_table, [:column1, :column2], name: "custom_name_idx", using: :hash
|
286
|
+
```
|
287
|
+
|
288
|
+
Note:
|
289
|
+
|
290
|
+
This method runs multiple DDL statements non-transactionally.
|
291
|
+
Creating or attaching an index on a child table could fail.
|
292
|
+
In such cases an exception will be raised, and an `INVALID` index will be left on the parent table.
|
293
|
+
|
191
294
|
#### safe\_add\_unvalidated\_check\_constraint
|
192
295
|
|
193
296
|
Safely add a `CHECK` constraint. The constraint will not be immediately validated on existing rows to avoid a full table scan while holding an exclusive lock. After adding the constraint, you'll need to use `safe_validate_check_constraint` to validate existing rows.
|
@@ -320,23 +423,7 @@ safe_partman_create_parent :table,
|
|
320
423
|
premake: 10,
|
321
424
|
start_partition: Time.current + 1.month,
|
322
425
|
infinite_time_partitions: false,
|
323
|
-
inherit_privileges: false
|
324
|
-
```
|
325
|
-
|
326
|
-
#### unsafe\_partman\_create\_parent
|
327
|
-
|
328
|
-
We have chosen to flag the use of `retention` and `retention_keep_table` as an unsafe operation.
|
329
|
-
While we recognize that these options are useful, we think they fit in the same category as `drop_table` and `rename_table`, and are therefore unsafe from an application perspective.
|
330
|
-
If you wish to define these options, you must use this method.
|
331
|
-
|
332
|
-
```ruby
|
333
|
-
safe_create_partitioned_table :table, type: :range, partition_key: :created_at do |t|
|
334
|
-
t.timestamps null: false
|
335
|
-
end
|
336
|
-
|
337
|
-
unsafe_partman_create_parent :table,
|
338
|
-
partition_key: :created_at,
|
339
|
-
interval: "weekly",
|
426
|
+
inherit_privileges: false,
|
340
427
|
retention: "60 days",
|
341
428
|
retention_keep_table: false
|
342
429
|
```
|
@@ -344,7 +431,7 @@ unsafe_partman_create_parent :table,
|
|
344
431
|
#### safe\_partman\_update\_config
|
345
432
|
|
346
433
|
There are some partitioning options that cannot be set in the call to `create_parent` and are only available in the `part_config` table.
|
347
|
-
As mentioned previously, you can specify these args in the call to `safe_partman_create_parent`
|
434
|
+
As mentioned previously, you can specify these args in the call to `safe_partman_create_parent` which will be delegated to this method.
|
348
435
|
Calling this method directly will be useful if you need to modify your partitioned table after the fact.
|
349
436
|
|
350
437
|
Allowed keyword args:
|
@@ -366,8 +453,9 @@ safe_partman_update_config :table,
|
|
366
453
|
|
367
454
|
#### unsafe\_partman\_update\_config
|
368
455
|
|
369
|
-
|
370
|
-
|
456
|
+
We have chosen to flag the use of `retention` and `retention_keep_table` as an unsafe operation.
|
457
|
+
While we recognize that these options are useful, changing these values fits in the same category as `drop_table` and `rename_table`, and is therefore unsafe from an application perspective.
|
458
|
+
If you wish to change these options, you must use this method.
|
371
459
|
|
372
460
|
```ruby
|
373
461
|
unsafe_partman_update_config :table,
|
@@ -387,7 +475,13 @@ safe_partman_reapply_privileges :table
|
|
387
475
|
|
388
476
|
#### safely\_acquire\_lock\_for\_table
|
389
477
|
|
390
|
-
|
478
|
+
Acquires a lock (in `ACCESS EXCLUSIVE` mode by default) on a table using the following algorithm:
|
479
|
+
|
480
|
+
1. Verify that no long-running queries are using the table.
|
481
|
+
- If long-running queries are currently using the table, sleep `PgHaMigrations::LOCK_TIMEOUT_SECONDS` and check again.
|
482
|
+
2. If no long-running queries are currently using the table, optimistically attempt to lock the table (with a timeout of `PgHaMigrations::LOCK_TIMEOUT_SECONDS`).
|
483
|
+
- If the lock is not acquired, sleep `PgHaMigrations::LOCK_FAILURE_RETRY_DELAY_MULTLIPLIER * PgHaMigrations::LOCK_TIMEOUT_SECONDS`, and start again at step 1.
|
484
|
+
3. If the lock is acquired, proceed to run the given block.
|
391
485
|
|
392
486
|
```ruby
|
393
487
|
safely_acquire_lock_for_table(:table) do
|
@@ -395,6 +489,27 @@ safely_acquire_lock_for_table(:table) do
|
|
395
489
|
end
|
396
490
|
```
|
397
491
|
|
492
|
+
Safely acquire a lock on a table in `SHARE` mode.
|
493
|
+
|
494
|
+
```ruby
|
495
|
+
safely_acquire_lock_for_table(:table, mode: :share) do
|
496
|
+
...
|
497
|
+
end
|
498
|
+
```
|
499
|
+
|
500
|
+
Safely acquire a lock on multiple tables in `EXCLUSIVE` mode.
|
501
|
+
|
502
|
+
```ruby
|
503
|
+
safely_acquire_lock_for_table(:table_a, :table_b, mode: :exclusive) do
|
504
|
+
...
|
505
|
+
end
|
506
|
+
```
|
507
|
+
|
508
|
+
Note:
|
509
|
+
|
510
|
+
We enforce that only one set of tables can be locked at a time.
|
511
|
+
Attempting to acquire a nested lock on a different set of tables will result in an error.
|
512
|
+
|
398
513
|
#### adjust\_lock\_timeout
|
399
514
|
|
400
515
|
Adjust lock timeout.
|
@@ -423,6 +538,22 @@ Set maintenance work mem.
|
|
423
538
|
safe_set_maintenance_work_mem_gb 1
|
424
539
|
```
|
425
540
|
|
541
|
+
#### ensure\_small\_table!
|
542
|
+
|
543
|
+
Ensure a table on disk is below the default threshold (10 megabytes).
|
544
|
+
This will raise an error if the table is too large.
|
545
|
+
|
546
|
+
```ruby
|
547
|
+
ensure_small_table! :table
|
548
|
+
```
|
549
|
+
|
550
|
+
Ensure a table on disk is below a custom threshold and is empty.
|
551
|
+
This will raise an error if the table is too large and/or contains data.
|
552
|
+
|
553
|
+
```ruby
|
554
|
+
ensure_small_table! :table, empty: true, threshold: 100.megabytes
|
555
|
+
```
|
556
|
+
|
426
557
|
### Configuration
|
427
558
|
|
428
559
|
The gem can be configured in an initializer.
|
@@ -436,9 +567,9 @@ end
|
|
436
567
|
#### Available options
|
437
568
|
|
438
569
|
- `disable_default_migration_methods`: If true, the default implementations of DDL changes in `ActiveRecord::Migration` and the PostgreSQL adapter will be overridden by implementations that raise a `PgHaMigrations::UnsafeMigrationError`. Default: `true`
|
439
|
-
- `check_for_dependent_objects`: If true, some `unsafe_*` migration methods will raise a `PgHaMigrations::UnsafeMigrationError` if any dependent objects exist. Default: `
|
440
|
-
- `prefer_single_step_column_addition_with_default`: If true, raise an error when adding a column and separately setting a constant default value for that column in the same migration. Default: `
|
441
|
-
- `allow_force_create_table`: If false, the `force: true` option to ActiveRecord's `create_table` method is disallowed. Default: `
|
570
|
+
- `check_for_dependent_objects`: If true, some `unsafe_*` migration methods will raise a `PgHaMigrations::UnsafeMigrationError` if any dependent objects exist. Default: `true`
|
571
|
+
- `prefer_single_step_column_addition_with_default`: If true, raise an error when adding a column and separately setting a constant default value for that column in the same migration. Default: `true`
|
572
|
+
- `allow_force_create_table`: If false, the `force: true` option to ActiveRecord's `create_table` method is disallowed. Default: `false`
|
442
573
|
- `infer_primary_key_on_partitioned_tables`: If true, the primary key for partitioned tables will be inferred on PostgreSQL 11+ databases (identifier column + partition key columns). Default: `true`
|
443
574
|
|
444
575
|
### Rake Tasks
|
@@ -465,6 +596,10 @@ Rake::Task["pg_ha_migrations:check_blocking_database_transactions"].enhance ["db
|
|
465
596
|
|
466
597
|
After checking out the repo, run `bin/setup` to install dependencies and start a postgres docker container. Then, run `bundle exec rspec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment. This project uses Appraisal to test against multiple versions of ActiveRecord; you can run the tests against all supported version with `bundle exec appraisal rspec`.
|
467
598
|
|
599
|
+
> **Warning**: If you rebuild the Docker container _without_ using `docker-compose build` (or the `--build` flag), it will not respect the `PGVERSION` environment variable that you've set if image layers from a different version exist. The Dockerfile uses a build-time argument that's only evaluated during the initial build. To change the Postgres version, you should explicitly provide the build argument: `docker-compose build --build-arg PGVERSION=15`. **Using `bin/setup` handles this for you.**
|
600
|
+
|
601
|
+
> **Warning**: The Postgres Dockerfile automatically creates an anonymous volume for the data directory. When changing the specified `PGVERSION` environment variable this volume must be reset using `--renew-anon-volumes` or booting Postgres will fail. **Using `bin/setup` handles this for you.**
|
602
|
+
|
468
603
|
Running tests will automatically create a test database in the locally running Postgres server. You can find the connection parameters in `spec/spec_helper.rb`, but setting the environment variables `PGHOST`, `PGPORT`, `PGUSER`, and `PGPASSWORD` will override the defaults.
|
469
604
|
|
470
605
|
To install this gem onto your local machine, run `bundle exec rake install`.
|
data/Rakefile
CHANGED
@@ -1,6 +1,8 @@
|
|
1
1
|
require "bundler/gem_tasks"
|
2
2
|
require "rspec/core/rake_task"
|
3
3
|
require "appraisal"
|
4
|
+
# In Rails 6 this isn't required in the right order and worked by accident; fixed in rails@0f5e7a66143
|
5
|
+
require "logger"
|
4
6
|
require_relative File.join("lib", "pg_ha_migrations")
|
5
7
|
|
6
8
|
RSpec::Core::RakeTask.new(:spec)
|
data/bin/setup
CHANGED
@@ -9,4 +9,6 @@ bundle exec appraisal install
|
|
9
9
|
# Do any other automated setup that you need to do here
|
10
10
|
|
11
11
|
# Launch a blank postgres image with partman for testing
|
12
|
-
|
12
|
+
# Because the Postgres image volumizes by default, we have to reset the volumes
|
13
|
+
# or launching the setup with different PGVERSION env vars will fail.
|
14
|
+
docker compose up -d --build --renew-anon-volumes
|
data/docker-compose.yml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
require "active_record/migration/compatibility"
|
2
2
|
|
3
3
|
module PgHaMigrations::AllowedVersions
|
4
|
-
ALLOWED_VERSIONS = [4.2, 5.0, 5.1, 5.2, 6.0, 6.1, 7.0].map do |v|
|
4
|
+
ALLOWED_VERSIONS = [4.2, 5.0, 5.1, 5.2, 6.0, 6.1, 7.0, 7.1, 7.2, 8.0].map do |v|
|
5
5
|
begin
|
6
6
|
ActiveRecord::Migration[v]
|
7
7
|
rescue ArgumentError
|
@@ -1,13 +1,18 @@
|
|
1
1
|
module PgHaMigrations
|
2
2
|
class BlockingDatabaseTransactions
|
3
3
|
LongRunningTransaction = Struct.new(:database, :current_query, :state, :transaction_age, :tables_with_locks) do
|
4
|
+
def initialize(*args)
|
5
|
+
super
|
6
|
+
|
7
|
+
self.tables_with_locks = tables_with_locks.map { |args| Table.new(*args) }.select(&:present?)
|
8
|
+
end
|
9
|
+
|
4
10
|
def description
|
5
|
-
locked_tables = tables_with_locks.compact
|
6
11
|
[
|
7
12
|
database,
|
8
|
-
|
13
|
+
tables_with_locks.size > 0 ? "tables (#{tables_with_locks.map(&:fully_qualified_name).join(', ')})" : nil,
|
9
14
|
"#{idle? ? "currently idle " : ""}transaction open for #{transaction_age}",
|
10
|
-
"#{idle? ? "last " : ""}query: #{current_query}"
|
15
|
+
"#{idle? ? "last " : ""}query: #{current_query}",
|
11
16
|
].compact.join(" | ")
|
12
17
|
end
|
13
18
|
|
@@ -43,7 +48,7 @@ module PgHaMigrations
|
|
43
48
|
psa.#{query_column} as current_query,
|
44
49
|
psa.state,
|
45
50
|
clock_timestamp() - psa.xact_start AS transaction_age,
|
46
|
-
array_agg(distinct c.relname) AS tables_with_locks
|
51
|
+
array_agg(distinct array[c.relname, ns.nspname, l.mode]) AS tables_with_locks
|
47
52
|
FROM pg_stat_activity psa -- Cluster wide
|
48
53
|
LEFT JOIN pg_locks l ON (psa.#{pid_column} = l.pid) -- Cluster wide
|
49
54
|
LEFT JOIN pg_class c ON ( -- Database wide
|
@@ -56,7 +61,7 @@ module PgHaMigrations
|
|
56
61
|
l.locktype != 'relation'
|
57
62
|
OR (
|
58
63
|
ns.nspname != 'pg_catalog'
|
59
|
-
AND c.relkind
|
64
|
+
AND c.relkind IN ('r', 'p') -- 'r' is a standard table; 'p' is a partition parent
|
60
65
|
)
|
61
66
|
)
|
62
67
|
AND psa.xact_start < clock_timestamp() - ?::interval
|
@@ -0,0 +1 @@
|
|
1
|
+
PgHaMigrations::CheckConstraint = Struct.new(:name, :definition, :validated)
|
@@ -0,0 +1,30 @@
|
|
1
|
+
require "active_record/connection_adapters/postgresql_adapter"
|
2
|
+
require "active_record/connection_adapters/postgresql/schema_creation"
|
3
|
+
|
4
|
+
module PgHaMigrations
|
5
|
+
module ActiveRecordHacks
|
6
|
+
module IndexAlgorithms
|
7
|
+
def index_algorithms
|
8
|
+
super.merge(only: "ONLY")
|
9
|
+
end
|
10
|
+
end
|
11
|
+
|
12
|
+
module CreateIndexDefinition
|
13
|
+
def visit_CreateIndexDefinition(o)
|
14
|
+
if o.algorithm == "ONLY"
|
15
|
+
o.algorithm = nil
|
16
|
+
|
17
|
+
quoted_index = quote_column_name(o.index.name)
|
18
|
+
quoted_table = quote_table_name(o.index.table)
|
19
|
+
|
20
|
+
super.sub("#{quoted_index} ON #{quoted_table}", "#{quoted_index} ON ONLY #{quoted_table}")
|
21
|
+
else
|
22
|
+
super
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
ActiveRecord::ConnectionAdapters::PostgreSQLAdapter.prepend(PgHaMigrations::ActiveRecordHacks::IndexAlgorithms)
|
30
|
+
ActiveRecord::ConnectionAdapters::PostgreSQL::SchemaCreation.prepend(PgHaMigrations::ActiveRecordHacks::CreateIndexDefinition)
|