pg_ha_migrations 1.8.0 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 6858f02b9a874bbaf79865c789a490c9aa1140240537d0eded8f48486c6348f3
4
- data.tar.gz: 733394b7f83821f71821816777765d982d1580761522e0751534d3e3c62f598b
3
+ metadata.gz: 7b2e6bb12c48cffbc58e2f182f6d147f9952fd5167b82feaebee77190d0c71f0
4
+ data.tar.gz: f438b6095e6493bae0545cca0f9b3b3e5aa6d2dfc14d80a8c851e80f5b90e09d
5
5
  SHA512:
6
- metadata.gz: 55f3f8e3730fc11183e71cbf6ba9d32dabf1c8b0ec532da6861b47d045cffb348184350831f564999ecee22b601931ab577ca20f8c4e831e0d4b776babe21a58
7
- data.tar.gz: e260ebe93cafba7f3119c41bd2594a797293713f95d072378dbc5733f176d86ce8a93c8a24b53b6fbb9ec77756f4db9ab5fdb92f6080de6ac0fb56e158e5b5d3
6
+ metadata.gz: 85b57ebf531422f9b14ff7d3e91a0759b9e4c714dd8b53dff2e5e45e0b44fe583ab726ad9a160f5d34de0bab102da0770caae5dad622e5294d4d8eba1b178a52
7
+ data.tar.gz: 2992367de5df4a1b57bcd024c39edebf40ecd5cc595a2b2b8034cd89af60d9bb068a4029e8771f66abf0fc7c0a73966d7c06bb0959c678df264e874276b5e73b
@@ -5,21 +5,28 @@ jobs:
5
5
  strategy:
6
6
  matrix:
7
7
  pg:
8
- - 11
9
- - 12
10
8
  - 13
11
9
  - 14
12
10
  - 15
13
11
  - 16
12
+ - 17
14
13
  ruby:
15
- - "3.0"
16
- - "3.1"
17
14
  - "3.2"
15
+ - "3.3"
16
+ - "3.4"
18
17
  gemfile:
19
- - rails_6.1
20
- - rails_7.0
21
18
  - rails_7.1
22
- name: PostgreSQL ${{ matrix.pg }} - Ruby ${{ matrix.ruby }} - ${{ matrix.gemfile }}
19
+ - rails_7.2
20
+ - rails_8.0
21
+ partman:
22
+ - 4
23
+ - 5
24
+ exclude:
25
+ - pg: 17
26
+ partman: 4 # Partman 4.x is not available in PGDG for PG 17
27
+ - pg: 13
28
+ partman: 5 # Partman 5.x is not available in PGDG for PG 13
29
+ name: PostgreSQL ${{ matrix.pg }} - Partman ${{ matrix.partman }} - Ruby ${{ matrix.ruby }} - ${{ matrix.gemfile }}
23
30
  runs-on: ubuntu-latest
24
31
  env: # $BUNDLE_GEMFILE must be set at the job level, so it is set for all steps
25
32
  BUNDLE_GEMFILE: gemfiles/${{ matrix.gemfile }}.gemfile
@@ -27,9 +34,10 @@ jobs:
27
34
  steps:
28
35
  - uses: actions/checkout@v3
29
36
  - name: Build postgres image and start the container
30
- run: docker-compose up -d --build
37
+ run: docker compose up -d --build
31
38
  env:
32
39
  PGVERSION: ${{ matrix.pg }}
40
+ PARTMAN_VERSION: ${{ matrix.partman }}
33
41
  - name: Setup Ruby using .ruby-version file
34
42
  uses: ruby/setup-ruby@v1
35
43
  with:
data/.ruby-version CHANGED
@@ -1 +1 @@
1
- ruby-3.0
1
+ ruby-3.4.2
data/Appraisals CHANGED
@@ -1,11 +1,11 @@
1
- appraise "rails-6.1" do
2
- gem "rails", "6.1.7.6"
1
+ appraise "rails-7.1" do
2
+ gem "rails", "~> 7.1.0"
3
3
  end
4
4
 
5
- appraise "rails-7.0" do
6
- gem "rails", "7.0.8"
5
+ appraise "rails-7.2" do
6
+ gem "rails", "~> 7.2.0"
7
7
  end
8
8
 
9
- appraise "rails-7.1" do
10
- gem "rails", "7.1.0"
9
+ appraise "rails-8.0" do
10
+ gem "rails", "~> 8.0.0"
11
11
  end
data/Dockerfile CHANGED
@@ -1,6 +1,7 @@
1
1
  ARG PGVERSION
2
+ ARG PARTMAN_VERSION
2
3
 
3
- FROM postgres:$PGVERSION-bullseye
4
+ FROM postgres:$PGVERSION-bookworm AS base
4
5
 
5
6
  RUN apt-get update && apt-get install -y curl ca-certificates gnupg lsb-release
6
7
 
@@ -8,4 +9,11 @@ RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor | te
8
9
 
9
10
  RUN echo "deb https://apt-archive.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg-archive main" > /etc/apt/sources.list.d/pgdg.list
10
11
 
11
- RUN apt update && apt-get install -y postgresql-$PG_MAJOR-partman=4.7.4-2.pgdg110+1
12
+ FROM base AS partman-4-branch
13
+ ENV PARTMAN_VERSION=4.7.4-2.pgdg120+1
14
+
15
+ FROM base AS partman-5-branch
16
+ ENV PARTMAN_VERSION=5.2.4-1.pgdg120+1
17
+
18
+ FROM partman-$PARTMAN_VERSION-branch AS final
19
+ RUN apt update && apt-get install -y postgresql-$PG_MAJOR-partman=$PARTMAN_VERSION
data/Gemfile CHANGED
@@ -4,4 +4,3 @@ git_source(:github) {|repo_name| "https://github.com/#{repo_name}" }
4
4
 
5
5
  # Specify your gem's dependencies in pg_ha_migrations.gemspec
6
6
  gemspec
7
-
data/README.md CHANGED
@@ -30,7 +30,36 @@ Or install it yourself as:
30
30
 
31
31
  $ gem install pg_ha_migrations
32
32
 
33
- ## Usage
33
+ ## Migration Safety
34
+
35
+ There are two major classes of concerns we try to handle in the API:
36
+
37
+ - Database safety (e.g., long-held locks)
38
+ - Application safety (e.g., dropping columns the app uses)
39
+
40
+ ### Migration Method Renaming
41
+
42
+ We rename migration methods with prefixes to explicitly denote their safety level:
43
+
44
+ - `safe_*`: These methods check for both application and database safety concerns, prefer concurrent operations where available, set low lock timeouts where appropriate, and decompose operations into multiple safe steps.
45
+ - `unsafe_*`: Using these methods is a signal that the DDL operation is not necessarily safe for a running application. They include basic safety features like safe lock acquisition and dependent object checking, but otherwise dispatch directly to the native ActiveRecord migration method.
46
+ - `raw_*`: These methods are a direct dispatch to the native ActiveRecord migration method.
47
+
48
+ Calling the original migration methods without a prefix will raise an error.
49
+
50
+ The API is designed to be explicit yet remain flexible. There may be situations where invoking the `unsafe_*` method is preferred (or the only option available for definitionally unsafe operations).
51
+
52
+ While `unsafe_*` methods were historically (before 2.0) pure wrappers for invoking the native ActiveRecord migration method, there is a class of problems that we can't handle easily without breaking that design rule a bit. For example, dropping a column is unsafe from an application perspective, so we make the application safety concerns explicit by using an `unsafe_` prefix. Using `unsafe_remove_column` calls out the need to audit the application to confirm the migration won't break the application. Because there are no safe alternatives we don't define a `safe_remove_column` analogue. However there are still conditions we'd like to assert before dropping a column. For example, dropping an unused column that's used in one or more indexes may be safe from an application perspective, but the cascading drop of the index won't use a `CONCURRENT` operation to drop the dependent indexes and is therefore unsafe from a database perspective.
53
+
54
+ For `unsafe_*` migration methods which support checks of this type you can bypass the checks by passing an `:allow_dependent_objects` key in the method's `options` hash containing an array of dependent object types you'd like to allow. These checks will run by default, but you can opt-out by setting `config.check_for_dependent_objects = false` [in your configuration initializer](#configuration).
55
+
56
+ ### Disallowed Migration Methods
57
+
58
+ We disallow the use of `unsafe_change_table`, as the equivalent operation can be composed with explicit `safe_*` / `unsafe_*` methods. If you _must_ use `change_table`, it is still available as `raw_change_table`.
59
+
60
+ ### Migration Method Arguments
61
+
62
+ We believe the `force: true` option to ActiveRecord's `create_table` method is always unsafe because it's not possible to denote exactly how the current state will change. Therefore we disallow using `force: true` even when calling `unsafe_create_table`. This option is enabled by default, but you can opt-out by setting `config.allow_force_create_table = true` [in your configuration initializer](#configuration).
34
63
 
35
64
  ### Rollback
36
65
 
@@ -46,40 +75,36 @@ end
46
75
 
47
76
  and never use `def change`. We believe that this is the only safe approach in production environments. For development environments we iterate by recreating the database from scratch every time we make a change.
48
77
 
49
- ### Migrations
78
+ ### Transactional DDL
50
79
 
51
- There are two major classes of concerns we try to handle in the API:
80
+ Individual DDL statements in PostgreSQL are transactional by default (as are all Postgres statements). Concurrent index creation and removal are two exceptions: these utility commands manage their own transaction state (and each uses multiple transactions to achieve the desired concurrency).
52
81
 
53
- - Database safety (e.g., long-held locks)
54
- - Application safety (e.g., dropping columns the app uses)
55
-
56
- We rename migration methods with prefixes denoting their safety level:
57
-
58
- - `safe_*`: These methods check for both application and database safety concerns, prefer concurrent operations where available, set low lock timeouts where appropriate, and decompose operations into multiple safe steps.
59
- - `unsafe_*`: These methods are generally a direct dispatch to the native ActiveRecord migration method.
82
+ We [disable ActiveRecord's DDL transactions](./lib/pg_ha_migrations/hacks/disable_ddl_transaction.rb) (which wrap the entire migration file in a transaction) by default for the following reasons:
60
83
 
61
- Calling the original migration methods without a prefix will raise an error.
62
-
63
- The API is designed to be explicit yet remain flexible. There may be situations where invoking the `unsafe_*` method is preferred (or the only option available for definitionally unsafe operations).
84
+ * [Running multiple DDL statements inside a transaction acquires exclusive locks on all of the modified objects](https://medium.com/paypal-tech/postgresql-at-scale-database-schema-changes-without-downtime-20d3749ed680#cc22).
85
+ * Acquired locks are held until the end of the transaction.
86
+ * Multiple locks creates the possibility of deadlocks.
87
+ * Increased exposure to long waits:
88
+ * Each newly acquired lock has its own timeout applied (so total lock time is additive).
89
+ * [Safe lock acquisition](#safely_acquire_lock_for_table) (which is used in each migration method where locks will be acquired) can issue multiple lock attempts on lock timeouts (with sleep delays between attempts).
64
90
 
65
- While `unsafe_*` methods were historically (through 1.0) pure wrappers for invoking the native ActiveRecord migration method, there is a class of problems that we can't handle easily without breaking that design rule a bit. For example, dropping a column is unsafe from an application perspective, so we make the application safety concerns explicit by using an `unsafe_` prefix. Using `unsafe_remove_column` calls out the need to audit the application to confirm the migration won't break the application. Because there are no safe alternatives we don't define a `safe_remove_column` analogue. However there are still conditions we'd like to assert before dropping a column. For example, dropping an unused column that's used in one or more indexes may be safe from an application perspective, but the cascading drop of the index won't use a `CONCURRENT` operation to drop the dependent indexes and is therefore unsafe from a database perspective.
91
+ Because of the above issues attempting to re-enable transaction migrations forfeits many of the safety guarantees this library provides and may even break certain functionally. If you'd like to experiment with it anyway you can re-enable transactional migrations by adding `self.disable_ddl_transaction = false` to your migration class definition.
66
92
 
67
- When `unsafe_*` migration methods support checks of this type you can bypass the checks by passing an `:allow_dependent_objects` key in the method's `options` hash containing an array of dependent object types you'd like to allow. Until 2.0 none of these checks will run by default, but you can opt-in by setting `config.check_for_dependent_objects = true` [in your configuration initializer](#configuration).
68
-
69
- Similarly we believe the `force: true` option to ActiveRecord's `create_table` method is always unsafe, and therefore we disallow it even when calling `unsafe_create_table`. This option won't be enabled by default until 2.0, but you can opt-in by setting `config.allow_force_create_table = false` [in your configuration initializer](#configuration).
93
+ ## Usage
70
94
 
71
- [Running multiple DDL statements inside a transaction acquires exclusive locks on all of the modified objects](https://medium.com/paypal-tech/postgresql-at-scale-database-schema-changes-without-downtime-20d3749ed680#cc22). For that reason, this gem [disables DDL transactions](./lib/pg_ha_migrations/hacks/disable_ddl_transaction.rb) by default. You can change this by resetting `ActiveRecord::Migration.disable_ddl_transaction` in your application.
95
+ ### Unsupported ActiveRecord Features
72
96
 
73
97
  The following functionality is currently unsupported:
74
98
 
75
- - Rollbacks
99
+ - [Rollback methods in migrations](#rollback)
76
100
  - Generators
77
101
  - schema.rb
78
102
 
79
- Compatibility notes:
103
+ ### Compatibility Notes
80
104
 
81
- - While some features may work with other versions, this gem is currently tested against PostgreSQL 11+ and Partman 4.x
82
- - There is a [bug](https://github.com/rails/rails/pull/41490) in early versions of Rails 6.1 when using `algorithm: :concurrently`. To add / remove indexes concurrently, please upgrade to at least Rails 6.1.4.
105
+ - While some features may work with other versions, this gem is currently tested against PostgreSQL 13+ and Partman 4.x
106
+
107
+ ### Migration Methods
83
108
 
84
109
  #### safe\_create\_table
85
110
 
@@ -119,9 +144,7 @@ Unsafely change the value of an enum type entry.
119
144
  unsafe_rename_enum_value(:enum, "old_value", "new_value")
120
145
  ```
121
146
 
122
- Note:
123
-
124
- Changing an enum value does not issue any long-running scans or acquire locks on usages of the enum type. Therefore multiple queries within a transaction concurrent with the change may see both the old and new values. To highlight these potential pitfalls no `safe_rename_enum_value` equivalent exists. Before modifying an enum type entry you should verify that no concurrently executing queries will attempt to write the old value and that read queries understand the new value.
147
+ > **Note:** Changing an enum value does not issue any long-running scans or acquire locks on usages of the enum type. Therefore multiple queries within a transaction concurrent with the change may see both the old and new values. To highlight these potential pitfalls no `safe_rename_enum_value` equivalent exists. Before modifying an enum type entry you should verify that no concurrently executing queries will attempt to write the old value and that read queries understand the new value.
125
148
 
126
149
  #### safe\_add\_column
127
150
 
@@ -153,7 +176,7 @@ safe_change_column_default :table, :column, -> { "NOW()" }
153
176
  safe_change_column_default :table, :column, -> { "'NOW()'" }
154
177
  ```
155
178
 
156
- Note: On Postgres 11+ adding a column with a constant default value does not rewrite or scan the table (under a lock or otherwise). In that case a migration adding a column with a default should do so in a single operation rather than the two-step `safe_add_column` followed by `safe_change_column_default`. We enforce this best practice with the error `PgHaMigrations::BestPracticeError`, but if your prefer otherwise (or are running in a mixed Postgres version environment), you may opt out by setting `config.prefer_single_step_column_addition_with_default = true` [in your configuration initializer](#configuration).
179
+ > **Note:** On Postgres 11+ adding a column with a constant default value does not rewrite or scan the table (under a lock or otherwise). In that case a migration adding a column with a default should do so in a single operation rather than the two-step `safe_add_column` followed by `safe_change_column_default`. We enforce this best practice with the error `PgHaMigrations::BestPracticeError`, but if your prefer otherwise (or are running in a mixed Postgres version environment), you may opt out by setting `config.prefer_single_step_column_addition_with_default = false` [in your configuration initializer](#configuration).
157
180
 
158
181
  #### safe\_make\_column\_nullable
159
182
 
@@ -162,6 +185,19 @@ Safely make the column nullable.
162
185
  ```ruby
163
186
  safe_make_column_nullable :table, :column
164
187
  ```
188
+ #### safe\_make\_column\_not\_nullable
189
+
190
+ Safely make the column not nullable. This method uses a `CHECK column IS NOT NULL` constraint to validate no values are null before altering the column. If such a constraint exists already, it is re-used, if it does not, a temporary constraint is added. Whether or not the constraint already existed, the constraint will be validated, if necessary, and removed after the column is marked `NOT NULL`.
191
+
192
+ ```ruby
193
+ safe_make_column_not_nullable :table, :column
194
+ ```
195
+
196
+ > **Note:**
197
+ > - This method may perform a full table scan to validate that no NULL values exist in the column. While no exclusive lock is held for this scan, on large tables the scan may take a long time.
198
+ > - The method runs multiple DDL statements non-transactionally. Validating the constraint can fail. In such cases an INVALID constraint will be left on the table. Calling `safe_make_column_not_nullable` again is safe.
199
+
200
+ If you want to avoid a full table scan and have already added and validated a suitable CHECK constraint, consider using [`safe_make_column_not_nullable_from_check_constraint`](#safe_make_column_not_nullable_from_check_constraint) instead.
165
201
 
166
202
  #### unsafe\_make\_column\_not\_nullable
167
203
 
@@ -171,6 +207,23 @@ Unsafely make a column not nullable.
171
207
  unsafe_make_column_not_nullable :table, :column
172
208
  ```
173
209
 
210
+ #### safe\_make\_column\_not\_nullable\_from\_check\_constraint
211
+
212
+ Variant of `safe_make_column_not_nullable` that safely makes a column NOT NULL using an existing validated CHECK constraint that enforces non-null values for the column. This method is expected to always be fast because it avoids a full table scan.
213
+
214
+ ```ruby
215
+ safe_make_column_not_nullable_from_check_constraint :table, :column, constraint_name: :constraint_name
216
+ ```
217
+
218
+ - `constraint_name` (required): The name of a validated CHECK constraint that enforces `column IS NOT NULL`.
219
+ - `drop_constraint:` (optional, default: true): Whether to drop the constraint after making the column NOT NULL.
220
+
221
+ You should use [`safe_make_column_not_nullable`](#safe_make_column_not_nullable) when neither a CHECK constraint or a NOT NULL constraint exists already. You should use this method when you already have an equivalent CHECK constraint on the table.
222
+
223
+ This method will raise an error if the constraint does not exist, is not validated, or does not strictly enforce non-null values for the column.
224
+
225
+ > **Note:** We do not attempt to catch all possible proofs of `column IS NOT NULL` by means of an existing constraint; only a constraint with the exact definition `column IS NOT NULL` will be recognized.
226
+
174
227
  #### safe\_add\_index\_on\_empty\_table
175
228
 
176
229
  Safely add an index on a table with zero rows. This will raise an error if the table contains data.
@@ -230,11 +283,10 @@ Add a composite index using the `hash` index type with custom name for the paren
230
283
  safe_add_concurrent_partitioned_index :partitioned_table, [:column1, :column2], name: "custom_name_idx", using: :hash
231
284
  ```
232
285
 
233
- Note:
234
-
235
- This method runs multiple DDL statements non-transactionally.
236
- Creating or attaching an index on a child table could fail.
237
- In such cases an exception will be raised, and an `INVALID` index will be left on the parent table.
286
+ > **Note:**
287
+ > This method runs multiple DDL statements non-transactionally.
288
+ > Creating or attaching an index on a child table could fail.
289
+ > In such cases an exception will be raised, and an `INVALID` index will be left on the parent table.
238
290
 
239
291
  #### safe\_add\_unvalidated\_check\_constraint
240
292
 
@@ -328,7 +380,7 @@ The rest are keyword args with the following mappings:
328
380
  - `premake` -> `p_premake`. Required: `false`. Partman defaults to `4`.
329
381
  - `start_partition` -> `p_start_partition`. Required: `false`. Partman defaults to the current timestamp.
330
382
 
331
- Note that we have chosen to require PostgreSQL 11+ and hardcode `p_type` to `native` for simplicity, as previous PostgreSQL versions are end-of-life.
383
+ > **Note:** We have chosen to require PostgreSQL 11+ and hardcode `p_type` to `native` (`range` in the case of Partman 5) for simplicity, as previous PostgreSQL versions are end-of-life.
332
384
 
333
385
  Additionally, this method allows you to configure a subset of attributes on the record stored in the [part\_config](https://github.com/pgpartman/pg_partman/blob/master/doc/pg_partman.md#tables) table.
334
386
  These options are delegated to the `unsafe_partman_update_config` method to update the record:
@@ -345,7 +397,7 @@ safe_create_partitioned_table :table, type: :range, partition_key: :created_at d
345
397
  t.timestamps null: false
346
398
  end
347
399
 
348
- safe_partman_create_parent :table, partition_key: :created_at, interval: "weekly"
400
+ safe_partman_create_parent :table, partition_key: :created_at, interval: "1 week"
349
401
  ```
350
402
 
351
403
  With custom overrides:
@@ -363,28 +415,12 @@ end
363
415
 
364
416
  safe_partman_create_parent :table,
365
417
  partition_key: :created_at,
366
- interval: "weekly",
418
+ interval: "1 week",
367
419
  template_table: :table_template,
368
420
  premake: 10,
369
421
  start_partition: Time.current + 1.month,
370
422
  infinite_time_partitions: false,
371
- inherit_privileges: false
372
- ```
373
-
374
- #### unsafe\_partman\_create\_parent
375
-
376
- We have chosen to flag the use of `retention` and `retention_keep_table` as an unsafe operation.
377
- While we recognize that these options are useful, we think they fit in the same category as `drop_table` and `rename_table`, and are therefore unsafe from an application perspective.
378
- If you wish to define these options, you must use this method.
379
-
380
- ```ruby
381
- safe_create_partitioned_table :table, type: :range, partition_key: :created_at do |t|
382
- t.timestamps null: false
383
- end
384
-
385
- unsafe_partman_create_parent :table,
386
- partition_key: :created_at,
387
- interval: "weekly",
423
+ inherit_privileges: false,
388
424
  retention: "60 days",
389
425
  retention_keep_table: false
390
426
  ```
@@ -392,7 +428,7 @@ unsafe_partman_create_parent :table,
392
428
  #### safe\_partman\_update\_config
393
429
 
394
430
  There are some partitioning options that cannot be set in the call to `create_parent` and are only available in the `part_config` table.
395
- As mentioned previously, you can specify these args in the call to `safe_partman_create_parent` or `unsafe_partman_create_parent` which will be delegated to this method.
431
+ As mentioned previously, you can specify these args in the call to `safe_partman_create_parent` which will be delegated to this method.
396
432
  Calling this method directly will be useful if you need to modify your partitioned table after the fact.
397
433
 
398
434
  Allowed keyword args:
@@ -403,7 +439,7 @@ Allowed keyword args:
403
439
  - `retention`
404
440
  - `retention_keep_table`
405
441
 
406
- Note that we detect if the value of `inherit_privileges` is changing and will automatically call `safe_partman_reapply_privileges` to ensure permissions are propagated to existing child partitions.
442
+ > **Note:** If `inherit_privileges` will change then `safe_partman_reapply_privileges` will be automatically called to ensure permissions are propagated to existing child partitions.
407
443
 
408
444
  ```ruby
409
445
  safe_partman_update_config :table,
@@ -414,8 +450,9 @@ safe_partman_update_config :table,
414
450
 
415
451
  #### unsafe\_partman\_update\_config
416
452
 
417
- As with creating a partman parent table, we have chosen to flag the use of `retention` and `retention_keep_table` as an unsafe operation.
418
- If you wish to define these options, you must use this method.
453
+ We have chosen to flag the use of `retention` and `retention_keep_table` as an unsafe operation.
454
+ While we recognize that these options are useful, changing these values fits in the same category as `drop_table` and `rename_table`, and is therefore unsafe from an application perspective.
455
+ If you wish to change these options, you must use this method.
419
456
 
420
457
  ```ruby
421
458
  unsafe_partman_update_config :table,
@@ -431,11 +468,40 @@ If your partitioned table is configured with `inherit_privileges` set to `true`,
431
468
  safe_partman_reapply_privileges :table
432
469
  ```
433
470
 
471
+ #### unsafe\_partman\_standardize\_partition\_naming
472
+
473
+ This method provides functionality to standardize existing Partman 4 tables such that naming is consistent with Partman 5.
474
+ The logic follows the guidelines in the [Partman upgrade docs](https://github.com/pgpartman/pg_partman/blob/v5.2.4/doc/pg_partman_5.0.1_upgrade.md).
475
+
476
+ Technically, only `weekly` and `quarterly` partitioned tables _need_ to be standardized prior to the upgrade.
477
+ _However_, Partman 5 changes the default [datetime_string](https://github.com/pgpartman/pg_partman/blob/v5.2.4/sql/functions/calculate_time_partition_info.sql#L13-L17) that is used for _all_ intervals (`YYYYMMDD` and `YYYYMMDD_HH24MISS`).
478
+ Compare that to the Partman 4 logic for [datetime_string](https://github.com/pgpartman/pg_partman/blob/v4.7.4/sql/functions/create_parent.sql#L434-L459).
479
+ So, this method supports standardization for _all_ Partman 4 intervals.
480
+
481
+ > **Note:** This method is safe from a database perspective, but is only safe from an application perspective if child tables are not directly referenced (child tables are renamed during this operation)
482
+
483
+ ```ruby
484
+ unsafe_partman_standardize_partition_naming :table
485
+ ```
486
+
487
+ This method uses a default statement timeout of 1 second.
488
+ If the target table has many partitions (hundreds of thousands), you may need to increase the statement timeout for the operation to succeed.
489
+
490
+ ```ruby
491
+ unsafe_partman_standardize_partition_naming :table, statement_timeout: 2
492
+ ```
493
+
434
494
  ### Utilities
435
495
 
436
496
  #### safely\_acquire\_lock\_for\_table
437
497
 
438
- Safely acquire an access exclusive lock for a table.
498
+ Acquires a lock (in `ACCESS EXCLUSIVE` mode by default) on a table using the following algorithm:
499
+
500
+ 1. Verify that no long-running queries are using the table.
501
+ - If long-running queries are currently using the table, sleep `PgHaMigrations::LOCK_TIMEOUT_SECONDS` and check again.
502
+ 2. If no long-running queries are currently using the table, optimistically attempt to lock the table (with a timeout of `PgHaMigrations::LOCK_TIMEOUT_SECONDS`).
503
+ - If the lock is not acquired, sleep `PgHaMigrations::LOCK_FAILURE_RETRY_DELAY_MULTLIPLIER * PgHaMigrations::LOCK_TIMEOUT_SECONDS`, and start again at step 1.
504
+ 3. If the lock is acquired, proceed to run the given block.
439
505
 
440
506
  ```ruby
441
507
  safely_acquire_lock_for_table(:table) do
@@ -443,7 +509,7 @@ safely_acquire_lock_for_table(:table) do
443
509
  end
444
510
  ```
445
511
 
446
- Safely acquire a lock for a table in a different mode.
512
+ Safely acquire a lock on a table in `SHARE` mode.
447
513
 
448
514
  ```ruby
449
515
  safely_acquire_lock_for_table(:table, mode: :share) do
@@ -451,10 +517,16 @@ safely_acquire_lock_for_table(:table, mode: :share) do
451
517
  end
452
518
  ```
453
519
 
454
- Note:
520
+ Safely acquire a lock on multiple tables in `EXCLUSIVE` mode.
521
+
522
+ ```ruby
523
+ safely_acquire_lock_for_table(:table_a, :table_b, mode: :exclusive) do
524
+ ...
525
+ end
526
+ ```
455
527
 
456
- We enforce that only one table (or a table and its partitions) can be locked at a time.
457
- Attempting to acquire a nested lock on a different table will result in an error.
528
+ > **Note:** We enforce that only one set of tables can be locked at a time.
529
+ > Attempting to acquire a nested lock on a different set of tables will result in an error.
458
530
 
459
531
  #### adjust\_lock\_timeout
460
532
 
@@ -513,10 +585,11 @@ end
513
585
  #### Available options
514
586
 
515
587
  - `disable_default_migration_methods`: If true, the default implementations of DDL changes in `ActiveRecord::Migration` and the PostgreSQL adapter will be overridden by implementations that raise a `PgHaMigrations::UnsafeMigrationError`. Default: `true`
516
- - `check_for_dependent_objects`: If true, some `unsafe_*` migration methods will raise a `PgHaMigrations::UnsafeMigrationError` if any dependent objects exist. Default: `false`
517
- - `prefer_single_step_column_addition_with_default`: If true, raise an error when adding a column and separately setting a constant default value for that column in the same migration. Default: `false`
518
- - `allow_force_create_table`: If false, the `force: true` option to ActiveRecord's `create_table` method is disallowed. Default: `true`
588
+ - `check_for_dependent_objects`: If true, some `unsafe_*` migration methods will raise a `PgHaMigrations::UnsafeMigrationError` if any dependent objects exist. Default: `true`
589
+ - `prefer_single_step_column_addition_with_default`: If true, raise an error when adding a column and separately setting a constant default value for that column in the same migration. Default: `true`
590
+ - `allow_force_create_table`: If false, the `force: true` option to ActiveRecord's `create_table` method is disallowed. Default: `false`
519
591
  - `infer_primary_key_on_partitioned_tables`: If true, the primary key for partitioned tables will be inferred on PostgreSQL 11+ databases (identifier column + partition key columns). Default: `true`
592
+ - `partman_5_compatibility_mode`: If true, `safe_partman_create_parent` will raise an error if the user provides an interval that is [not supported by Partman 5](https://github.com/pgpartman/pg_partman/blob/v5.2.4/sql/functions/create_parent.sql#L86-L96). If the interval is supported, the method will ensure table name suffixes match the Partman 5 format (`YYYYMMDD`, `YYYYMMDD_HTH24MISS`). Default: `false`
520
593
 
521
594
  ### Rake Tasks
522
595
 
@@ -542,13 +615,17 @@ Rake::Task["pg_ha_migrations:check_blocking_database_transactions"].enhance ["db
542
615
 
543
616
  After checking out the repo, run `bin/setup` to install dependencies and start a postgres docker container. Then, run `bundle exec rspec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment. This project uses Appraisal to test against multiple versions of ActiveRecord; you can run the tests against all supported version with `bundle exec appraisal rspec`.
544
617
 
618
+ > **Warning**: If you rebuild the Docker container _without_ using `docker-compose build` (or the `--build` flag), it will not respect the `PGVERSION` environment variable that you've set if image layers from a different version exist. The Dockerfile uses a build-time argument that's only evaluated during the initial build. To change the Postgres version, you should explicitly provide the build argument: `docker-compose build --build-arg PGVERSION=15`. **Using `bin/setup` handles this for you.**
619
+
620
+ > **Warning**: The Postgres Dockerfile automatically creates an anonymous volume for the data directory. When changing the specified `PGVERSION` environment variable this volume must be reset using `--renew-anon-volumes` or booting Postgres will fail. **Using `bin/setup` handles this for you.**
621
+
545
622
  Running tests will automatically create a test database in the locally running Postgres server. You can find the connection parameters in `spec/spec_helper.rb`, but setting the environment variables `PGHOST`, `PGPORT`, `PGUSER`, and `PGPASSWORD` will override the defaults.
546
623
 
547
624
  To install this gem onto your local machine, run `bundle exec rake install`.
548
625
 
549
626
  To release a new version, update the version number in `version.rb`, commit the change, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](https://rubygems.org).
550
627
 
551
- Note: if while releasing the gem you get the error ``Your rubygems.org credentials aren't set. Run `gem push` to set them.`` you can more simply run `gem signin`.
628
+ > **Note:** If while releasing the gem you get the error ``Your rubygems.org credentials aren't set. Run `gem push` to set them.`` you can more simply run `gem signin`.
552
629
 
553
630
  ## Contributing
554
631
 
data/Rakefile CHANGED
@@ -1,6 +1,8 @@
1
1
  require "bundler/gem_tasks"
2
2
  require "rspec/core/rake_task"
3
3
  require "appraisal"
4
+ # In Rails 6 this isn't required in the right order and worked by accident; fixed in rails@0f5e7a66143
5
+ require "logger"
4
6
  require_relative File.join("lib", "pg_ha_migrations")
5
7
 
6
8
  RSpec::Core::RakeTask.new(:spec)
data/bin/setup CHANGED
@@ -9,4 +9,6 @@ bundle exec appraisal install
9
9
  # Do any other automated setup that you need to do here
10
10
 
11
11
  # Launch a blank postgres image with partman for testing
12
- docker-compose up -d --build
12
+ # Because the Postgres image volumizes by default, we have to reset the volumes
13
+ # or launching the setup with different PGVERSION env vars will fail.
14
+ docker compose up -d --build --renew-anon-volumes
data/docker-compose.yml CHANGED
@@ -4,7 +4,8 @@ services:
4
4
  build:
5
5
  context: .
6
6
  args:
7
- - PGVERSION=${PGVERSION:-16}
7
+ - PGVERSION=${PGVERSION:-17}
8
+ - PARTMAN_VERSION=${PARTMAN_VERSION:-5}
8
9
  ports:
9
10
  - "5432:5432"
10
11
  environment:
@@ -2,6 +2,6 @@
2
2
 
3
3
  source "https://rubygems.org"
4
4
 
5
- gem "rails", "7.1.0"
5
+ gem "rails", "~> 7.1.0"
6
6
 
7
7
  gemspec path: "../"
@@ -2,6 +2,6 @@
2
2
 
3
3
  source "https://rubygems.org"
4
4
 
5
- gem "rails", "6.1.7.6"
5
+ gem "rails", "~> 7.2.0"
6
6
 
7
7
  gemspec path: "../"
@@ -2,6 +2,6 @@
2
2
 
3
3
  source "https://rubygems.org"
4
4
 
5
- gem "rails", "7.0.8"
5
+ gem "rails", "~> 8.0.0"
6
6
 
7
7
  gemspec path: "../"
@@ -1,7 +1,7 @@
1
1
  require "active_record/migration/compatibility"
2
2
 
3
3
  module PgHaMigrations::AllowedVersions
4
- ALLOWED_VERSIONS = [4.2, 5.0, 5.1, 5.2, 6.0, 6.1, 7.0, 7.1].map do |v|
4
+ ALLOWED_VERSIONS = [4.2, 5.0, 5.1, 5.2, 6.0, 6.1, 7.0, 7.1, 7.2, 8.0].map do |v|
5
5
  begin
6
6
  ActiveRecord::Migration[v]
7
7
  rescue ArgumentError
@@ -0,0 +1,8 @@
1
+ PgHaMigrations::CheckConstraint = Struct.new(:name, :definition, :validated) do
2
+ def initialize(name, definition, validated)
3
+ # pg_get_constraintdef includes NOT VALID in the definition,
4
+ # but we return that as a separate attribute.
5
+ definition = definition&.gsub(/ NOT VALID\Z/, "")
6
+ super(name, definition, validated)
7
+ end
8
+ end
@@ -0,0 +1,35 @@
1
+ module PgHaMigrations
2
+ class Extension
3
+ attr_reader :name, :schema, :version
4
+
5
+ def initialize(name)
6
+ @name = name
7
+
8
+ @schema, @version = ActiveRecord::Base.connection.select_rows(<<~SQL).first
9
+ SELECT nspname, extversion
10
+ FROM pg_namespace JOIN pg_extension
11
+ ON pg_namespace.oid = pg_extension.extnamespace
12
+ WHERE pg_extension.extname = #{ActiveRecord::Base.connection.quote(name)}
13
+ LIMIT 1
14
+ SQL
15
+ end
16
+
17
+ def quoted_schema
18
+ return unless schema
19
+
20
+ PG::Connection.quote_ident(schema)
21
+ end
22
+
23
+ def major_version
24
+ return unless version
25
+
26
+ Gem::Version.new(version)
27
+ .segments
28
+ .first
29
+ end
30
+
31
+ def installed?
32
+ !!schema && !!version
33
+ end
34
+ end
35
+ end
@@ -93,6 +93,18 @@ module PgHaMigrations
93
93
  MODE_CONFLICTS.keys.index(mode) <=> MODE_CONFLICTS.keys.index(other.mode)
94
94
  end
95
95
 
96
+ def eql?(other)
97
+ other.is_a?(LockMode) && mode == other.mode
98
+ end
99
+
100
+ def ==(other)
101
+ eql?(other)
102
+ end
103
+
104
+ def hash
105
+ mode.hash
106
+ end
107
+
96
108
  def conflicts_with?(other)
97
109
  MODE_CONFLICTS[mode].include?(other.mode)
98
110
  end