@supabase/pg-delta 1.0.0-alpha.20 → 1.0.0-alpha.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. package/dist/core/catalog.diff.js +4 -4
  2. package/dist/core/catalog.model.d.ts +8 -1
  3. package/dist/core/catalog.model.js +9 -8
  4. package/dist/core/expand-replace-dependencies.js +23 -0
  5. package/dist/core/objects/extract-with-retry.d.ts +36 -0
  6. package/dist/core/objects/extract-with-retry.js +51 -0
  7. package/dist/core/objects/index/index.diff.js +0 -1
  8. package/dist/core/objects/index/index.model.d.ts +2 -3
  9. package/dist/core/objects/index/index.model.js +17 -6
  10. package/dist/core/objects/materialized-view/materialized-view.model.d.ts +2 -1
  11. package/dist/core/objects/materialized-view/materialized-view.model.js +20 -4
  12. package/dist/core/objects/procedure/procedure.model.d.ts +2 -1
  13. package/dist/core/objects/procedure/procedure.model.js +20 -4
  14. package/dist/core/objects/publication/changes/publication.alter.d.ts +1 -1
  15. package/dist/core/objects/rls-policy/rls-policy.diff.js +13 -1
  16. package/dist/core/objects/rule/rule.model.d.ts +2 -1
  17. package/dist/core/objects/rule/rule.model.js +20 -3
  18. package/dist/core/objects/sequence/sequence.diff.d.ts +2 -1
  19. package/dist/core/objects/sequence/sequence.diff.js +41 -9
  20. package/dist/core/objects/table/changes/table.alter.d.ts +16 -1
  21. package/dist/core/objects/table/changes/table.alter.js +39 -6
  22. package/dist/core/objects/table/table.diff.js +40 -17
  23. package/dist/core/objects/table/table.model.d.ts +6 -1
  24. package/dist/core/objects/table/table.model.js +50 -12
  25. package/dist/core/objects/trigger/trigger.model.d.ts +2 -1
  26. package/dist/core/objects/trigger/trigger.model.js +20 -4
  27. package/dist/core/objects/utils.d.ts +1 -0
  28. package/dist/core/objects/utils.js +3 -0
  29. package/dist/core/objects/view/view.model.d.ts +2 -1
  30. package/dist/core/objects/view/view.model.js +20 -4
  31. package/dist/core/plan/create.js +3 -1
  32. package/dist/core/plan/types.d.ts +8 -0
  33. package/dist/core/post-diff-normalization.d.ts +36 -0
  34. package/dist/core/post-diff-normalization.js +202 -0
  35. package/dist/core/sort/cycle-breakers.d.ts +15 -0
  36. package/dist/core/sort/cycle-breakers.js +269 -0
  37. package/dist/core/sort/sort-changes.js +97 -43
  38. package/dist/core/sort/utils.d.ts +10 -0
  39. package/dist/core/sort/utils.js +28 -0
  40. package/package.json +1 -1
  41. package/src/core/catalog.diff.ts +4 -3
  42. package/src/core/catalog.model.ts +20 -8
  43. package/src/core/expand-replace-dependencies.test.ts +139 -5
  44. package/src/core/expand-replace-dependencies.ts +24 -0
  45. package/src/core/objects/extract-with-retry.test.ts +143 -0
  46. package/src/core/objects/extract-with-retry.ts +87 -0
  47. package/src/core/objects/index/index.diff.ts +0 -1
  48. package/src/core/objects/index/index.model.test.ts +37 -1
  49. package/src/core/objects/index/index.model.ts +25 -6
  50. package/src/core/objects/materialized-view/materialized-view.model.test.ts +93 -0
  51. package/src/core/objects/materialized-view/materialized-view.model.ts +27 -4
  52. package/src/core/objects/procedure/procedure.model.test.ts +117 -0
  53. package/src/core/objects/procedure/procedure.model.ts +28 -5
  54. package/src/core/objects/publication/changes/publication.alter.ts +1 -1
  55. package/src/core/objects/rls-policy/rls-policy.diff.ts +19 -1
  56. package/src/core/objects/rule/rule.model.test.ts +99 -0
  57. package/src/core/objects/rule/rule.model.ts +28 -4
  58. package/src/core/objects/sequence/sequence.diff.test.ts +93 -1
  59. package/src/core/objects/sequence/sequence.diff.ts +43 -10
  60. package/src/core/objects/table/changes/table.alter.test.ts +26 -23
  61. package/src/core/objects/table/changes/table.alter.ts +66 -10
  62. package/src/core/objects/table/table.diff.test.ts +43 -0
  63. package/src/core/objects/table/table.diff.ts +52 -23
  64. package/src/core/objects/table/table.model.test.ts +209 -0
  65. package/src/core/objects/table/table.model.ts +62 -14
  66. package/src/core/objects/trigger/trigger.model.test.ts +113 -0
  67. package/src/core/objects/trigger/trigger.model.ts +28 -5
  68. package/src/core/objects/utils.ts +3 -0
  69. package/src/core/objects/view/view.model.test.ts +90 -0
  70. package/src/core/objects/view/view.model.ts +28 -5
  71. package/src/core/plan/create.ts +3 -1
  72. package/src/core/plan/types.ts +8 -0
  73. package/src/core/{post-diff-cycle-breaking.test.ts → post-diff-normalization.test.ts} +168 -160
  74. package/src/core/post-diff-normalization.ts +260 -0
  75. package/src/core/sort/cycle-breakers.test.ts +476 -0
  76. package/src/core/sort/cycle-breakers.ts +311 -0
  77. package/src/core/sort/sort-changes.ts +135 -50
  78. package/src/core/sort/utils.ts +38 -0
  79. package/dist/core/post-diff-cycle-breaking.d.ts +0 -29
  80. package/dist/core/post-diff-cycle-breaking.js +0 -209
  81. package/src/core/post-diff-cycle-breaking.ts +0 -317
@@ -19,5 +19,15 @@ export declare function isMetadataStableId(stableId: string): boolean;
19
19
  * - ALTER operations with scope="privilege" → create_alter_object phase (metadata changes)
20
20
  * - ALTER operations that drop actual objects → drop phase (destructive ALTER)
21
21
  * - ALTER operations that don't drop objects → create_alter_object phase (non-destructive ALTER)
22
+ *
23
+ * Dependency-breaking ALTERs that remove a `pg_depend` edge to another
24
+ * object that may be dropped in the same plan (for example
25
+ * `ALTER COLUMN ... DROP DEFAULT` releasing a sequence reference, or
26
+ * `ALTER COLUMN ... TYPE <built-in>` releasing a user-defined type
27
+ * reference) are routed to the drop phase. The drop phase sorts in reverse
28
+ * dependency order using the main catalog, so the catalog edges already
29
+ * in `pg_depend` order the ALTER before any dependent `DROP TYPE` /
30
+ * `DROP SEQUENCE` / `DROP FUNCTION` and PostgreSQL no longer rejects the
31
+ * drop with error 2BP01.
22
32
  */
23
33
  export declare function getExecutionPhase(change: Change): Phase;
@@ -1,3 +1,4 @@
1
+ import { AlterTableAlterColumnDropDefault, AlterTableAlterColumnDropIdentity, AlterTableAlterColumnType, } from "../objects/table/changes/table.alter.js";
1
2
  /**
2
3
  * Check if a stable ID represents metadata (ACL, default privileges, comments, etc.)
3
4
  * rather than an actual database object.
@@ -20,6 +21,16 @@ export function isMetadataStableId(stableId) {
20
21
  * - ALTER operations with scope="privilege" → create_alter_object phase (metadata changes)
21
22
  * - ALTER operations that drop actual objects → drop phase (destructive ALTER)
22
23
  * - ALTER operations that don't drop objects → create_alter_object phase (non-destructive ALTER)
24
+ *
25
+ * Dependency-breaking ALTERs that remove a `pg_depend` edge to another
26
+ * object that may be dropped in the same plan (for example
27
+ * `ALTER COLUMN ... DROP DEFAULT` releasing a sequence reference, or
28
+ * `ALTER COLUMN ... TYPE <built-in>` releasing a user-defined type
29
+ * reference) are routed to the drop phase. The drop phase sorts in reverse
30
+ * dependency order using the main catalog, so the catalog edges already
31
+ * in `pg_depend` order the ALTER before any dependent `DROP TYPE` /
32
+ * `DROP SEQUENCE` / `DROP FUNCTION` and PostgreSQL no longer rejects the
33
+ * drop with error 2BP01.
23
34
  */
24
35
  export function getExecutionPhase(change) {
25
36
  // DROP operations always go to drop phase
@@ -43,6 +54,23 @@ export function getExecutionPhase(change) {
43
54
  // Destructive ALTER (DROP COLUMN, DROP CONSTRAINT, etc.) → drop phase
44
55
  return "drop";
45
56
  }
57
+ // Dependency-breaking column ALTERs that release a pg_depend edge.
58
+ // Routing these to the drop phase lets the existing catalog dependency
59
+ // edges (column → sequence, column → identity sequence) order them
60
+ // before the matching DROP statement.
61
+ if (change instanceof AlterTableAlterColumnDropDefault ||
62
+ change instanceof AlterTableAlterColumnDropIdentity) {
63
+ return "drop";
64
+ }
65
+ // ALTER COLUMN ... TYPE only safely runs in the drop phase when the
66
+ // target type is built-in. For user-defined target types we cannot tell
67
+ // here whether the type is created in the same plan, and the create
68
+ // happens in create_alter phase, so we keep the alter in that phase to
69
+ // preserve the create-then-alter ordering.
70
+ if (change instanceof AlterTableAlterColumnType &&
71
+ !change.column.is_custom_type) {
72
+ return "drop";
73
+ }
46
74
  // Non-destructive ALTER (ADD COLUMN, GRANT, etc.) → create_alter phase
47
75
  return "create_alter_object";
48
76
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@supabase/pg-delta",
3
- "version": "1.0.0-alpha.20",
3
+ "version": "1.0.0-alpha.22",
4
4
  "description": "PostgreSQL migrations made easy",
5
5
  "type": "module",
6
6
  "sideEffects": false,
@@ -1,7 +1,7 @@
1
1
  import debug from "debug";
2
2
  import type { Catalog } from "./catalog.model.ts";
3
3
  import { expandReplaceDependencies } from "./expand-replace-dependencies.ts";
4
- import { normalizePostDiffCycles } from "./post-diff-cycle-breaking.ts";
4
+ import { normalizePostDiffChanges } from "./post-diff-normalization.ts";
5
5
 
6
6
  const debugCatalog = debug("pg-delta:catalog");
7
7
 
@@ -190,6 +190,7 @@ export function diffCatalogs(
190
190
  main.sequences,
191
191
  branch.sequences,
192
192
  branch.tables,
193
+ main.tables,
193
194
  ),
194
195
  );
195
196
  changes.push(...diffTables(diffContext, main.tables, branch.tables));
@@ -238,10 +239,10 @@ export function diffCatalogs(
238
239
  mainCatalog: main,
239
240
  branchCatalog: branch,
240
241
  });
241
- filteredChanges = normalizePostDiffCycles({
242
+ filteredChanges = normalizePostDiffChanges({
242
243
  changes: expandedDependencies.changes,
243
- mainCatalog: main,
244
244
  replacedTableIds: expandedDependencies.replacedTableIds,
245
+ branchTables: branch.tables,
245
246
  });
246
247
 
247
248
  debugCatalog(
@@ -302,7 +302,19 @@ export async function createEmptyCatalog(
302
302
  });
303
303
  }
304
304
 
305
- export async function extractCatalog(pool: Pool) {
305
+ interface ExtractCatalogOptions {
306
+ /**
307
+ * Number of retry attempts for catalog extractors when `pg_get_*def()`
308
+ * returns NULL for at least one row. See `ExtractRetryOptions.retries`.
309
+ */
310
+ extractRetries?: number;
311
+ }
312
+
313
+ export async function extractCatalog(
314
+ pool: Pool,
315
+ options: ExtractCatalogOptions = {},
316
+ ) {
317
+ const retryOptions = { retries: options.extractRetries };
306
318
  const [
307
319
  aggregates,
308
320
  collations,
@@ -339,21 +351,21 @@ export async function extractCatalog(pool: Pool) {
339
351
  extractDomains(pool).then(listToRecord),
340
352
  extractEnums(pool).then(listToRecord),
341
353
  extractExtensions(pool).then(listToRecord),
342
- extractIndexes(pool).then(listToRecord),
343
- extractMaterializedViews(pool).then(listToRecord),
354
+ extractIndexes(pool, retryOptions).then(listToRecord),
355
+ extractMaterializedViews(pool, retryOptions).then(listToRecord),
344
356
  extractSubscriptions(pool).then(listToRecord),
345
357
  extractPublications(pool).then(listToRecord),
346
- extractProcedures(pool).then(listToRecord),
358
+ extractProcedures(pool, retryOptions).then(listToRecord),
347
359
  extractRlsPolicies(pool).then(listToRecord),
348
360
  extractRoles(pool).then(listToRecord),
349
361
  extractSchemas(pool).then(listToRecord),
350
362
  extractSequences(pool).then(listToRecord),
351
- extractTables(pool).then(listToRecord),
352
- extractTriggers(pool).then(listToRecord),
363
+ extractTables(pool, retryOptions).then(listToRecord),
364
+ extractTriggers(pool, retryOptions).then(listToRecord),
353
365
  extractEventTriggers(pool).then(listToRecord),
354
- extractRules(pool).then(listToRecord),
366
+ extractRules(pool, retryOptions).then(listToRecord),
355
367
  extractRanges(pool).then(listToRecord),
356
- extractViews(pool).then(listToRecord),
368
+ extractViews(pool, retryOptions).then(listToRecord),
357
369
  extractForeignDataWrappers(pool).then(listToRecord),
358
370
  extractServers(pool).then(listToRecord),
359
371
  extractUserMappings(pool).then(listToRecord),
@@ -97,19 +97,154 @@ describe("expandReplaceDependencies", () => {
97
97
  expect(result.replacedTableIds.size).toBe(0);
98
98
  });
99
99
 
100
+ test("promotes surviving dependent view when its referenced table is dropped without a same-name create", async () => {
101
+ // Reproduces issue #228 case 3: ALTER TABLE users RENAME TO members.
102
+ // pg-delta sees `users` as drop-only and `members` as create-only — the
103
+ // stableIds differ, so neither is in the createdIds∩droppedIds replace
104
+ // root set. The dependent view `user_count` exists in both catalogs
105
+ // (its definition was rewritten to FROM members in branch). Without
106
+ // expansion, DROP TABLE users would fail because user_count still
107
+ // references it. The expander must seed the drop-only table as a root
108
+ // so the surviving dependent gets promoted to DROP+CREATE.
109
+ const baseline = await createEmptyCatalog(170000, "postgres");
110
+ const usersTable = new Table({
111
+ schema: "public",
112
+ name: "users",
113
+ persistence: "p",
114
+ row_security: false,
115
+ force_row_security: false,
116
+ has_indexes: false,
117
+ has_rules: false,
118
+ has_triggers: false,
119
+ has_subclasses: false,
120
+ is_populated: true,
121
+ replica_identity: "d",
122
+ is_partition: false,
123
+ options: null,
124
+ partition_bound: null,
125
+ partition_by: null,
126
+ owner: "postgres",
127
+ comment: null,
128
+ parent_schema: null,
129
+ parent_name: null,
130
+ columns: [
131
+ {
132
+ name: "id",
133
+ position: 1,
134
+ data_type: "integer",
135
+ data_type_str: "integer",
136
+ is_custom_type: false,
137
+ custom_type_type: null,
138
+ custom_type_category: null,
139
+ custom_type_schema: null,
140
+ custom_type_name: null,
141
+ not_null: true,
142
+ is_identity: false,
143
+ is_identity_always: false,
144
+ is_generated: false,
145
+ collation: null,
146
+ default: null,
147
+ comment: null,
148
+ },
149
+ ],
150
+ privileges: [],
151
+ });
152
+ const mainView = new View({
153
+ schema: "public",
154
+ name: "user_count",
155
+ owner: "postgres",
156
+ definition: " SELECT count(*) AS n FROM public.users;",
157
+ row_security: false,
158
+ force_row_security: false,
159
+ has_indexes: false,
160
+ has_rules: false,
161
+ has_triggers: false,
162
+ has_subclasses: false,
163
+ is_populated: true,
164
+ replica_identity: "d",
165
+ is_partition: false,
166
+ partition_bound: null,
167
+ comment: null,
168
+ columns: [
169
+ {
170
+ name: "n",
171
+ position: 1,
172
+ data_type: "bigint",
173
+ data_type_str: "bigint",
174
+ is_custom_type: false,
175
+ custom_type_type: null,
176
+ custom_type_category: null,
177
+ custom_type_schema: null,
178
+ custom_type_name: null,
179
+ not_null: false,
180
+ is_identity: false,
181
+ is_identity_always: false,
182
+ is_generated: false,
183
+ collation: null,
184
+ default: null,
185
+ comment: null,
186
+ },
187
+ ],
188
+ options: null,
189
+ privileges: [],
190
+ });
191
+ const branchView = new View({
192
+ ...mainView,
193
+ definition: " SELECT count(*) AS n FROM public.members;",
194
+ });
195
+
196
+ const mainCatalog = new Catalog({
197
+ ...baseline,
198
+ tables: { [usersTable.stableId]: usersTable },
199
+ views: { [mainView.stableId]: mainView },
200
+ depends: [
201
+ {
202
+ dependent_stable_id: mainView.stableId,
203
+ referenced_stable_id: usersTable.stableId,
204
+ deptype: "n",
205
+ },
206
+ ],
207
+ });
208
+ const branchCatalog = new Catalog({
209
+ ...baseline,
210
+ views: { [branchView.stableId]: branchView },
211
+ });
212
+
213
+ // Simulated planner output: DropTable(users) + CreateView orReplace(user_count).
214
+ // The surviving view appears only as a "create" (CREATE OR REPLACE VIEW),
215
+ // never as a drop, so DROP TABLE users would fail without expansion.
216
+ const changes: Change[] = [
217
+ new DropTable({ table: usersTable }),
218
+ new CreateView({ view: branchView, orReplace: true }),
219
+ ];
220
+ const result = expandReplaceDependencies({
221
+ changes,
222
+ mainCatalog,
223
+ branchCatalog,
224
+ });
225
+
226
+ // The view's surviving CREATE OR REPLACE remains, AND a DropView is
227
+ // injected so the drop phase removes the view before the table.
228
+ expect(result.changes.some((c) => c instanceof DropView)).toBe(true);
229
+ });
230
+
100
231
  test("does not replace the owning table for an owned sequence recreation", async () => {
101
232
  const baseline = await createEmptyCatalog(170000, "postgres");
233
+ // Use `persistence` (UNLOGGED → LOGGED) to trigger the
234
+ // non-alterable replace path: it's the only field still in
235
+ // NON_ALTERABLE_FIELDS. `data_type` was previously in that list
236
+ // but is now alterable in place via ALTER SEQUENCE ... AS <type>.
102
237
  const mainSequence = new Sequence({
103
238
  schema: "public",
104
239
  name: "user_id_seq",
105
- data_type: "integer",
240
+ data_type: "bigint",
106
241
  start_value: 1,
107
242
  minimum_value: 1n,
108
- maximum_value: 2147483647n,
243
+ maximum_value: 9223372036854775807n,
109
244
  increment: 1,
110
245
  cycle_option: false,
111
246
  cache_size: 1,
112
- persistence: "p",
247
+ persistence: "u",
113
248
  owned_by_schema: "public",
114
249
  owned_by_table: "users",
115
250
  owned_by_column: "id",
@@ -119,8 +254,7 @@ describe("expandReplaceDependencies", () => {
119
254
  });
120
255
  const branchSequence = new Sequence({
121
256
  ...mainSequence,
122
- data_type: "bigint",
123
- maximum_value: 9223372036854775807n,
257
+ persistence: "p",
124
258
  });
125
259
  const usersTable = new Table({
126
260
  schema: "public",
@@ -129,6 +129,30 @@ export function expandReplaceDependencies({
129
129
  }
130
130
  }
131
131
 
132
+ // Drop-only objects (no matching create — typically a renamed-away table or
133
+ // type) are also expansion roots: anything in main that depends on them via
134
+ // pg_depend must drop before the parent does. Without this seed, a renamed
135
+ // table whose dependent view stays in the branch catalog (with an updated
136
+ // definition that no longer references the old name) would still try to
137
+ // run DROP TABLE old_name while old_name is referenced by the view, which
138
+ // PostgreSQL refuses without CASCADE. The walk below promotes the surviving
139
+ // dependent to DROP+CREATE so its drop is sequenced before the parent drop.
140
+ for (const id of droppedIds) {
141
+ if (createdIds.has(id)) continue;
142
+ if (replaceRoots.has(id)) continue;
143
+ // Only seed for object kinds that can have catalog dependents we know
144
+ // how to recreate via buildReplaceChanges.
145
+ if (
146
+ id.startsWith("table:") ||
147
+ id.startsWith("view:") ||
148
+ id.startsWith("materializedView:") ||
149
+ id.startsWith("type:") ||
150
+ id.startsWith("domain:")
151
+ ) {
152
+ replaceRoots.add(id);
153
+ }
154
+ }
155
+
132
156
  if (replaceRoots.size === 0) {
133
157
  return {
134
158
  changes,
@@ -0,0 +1,143 @@
1
+ import { afterEach, describe, expect, test } from "bun:test";
2
+ import {
3
+ extractWithDefinitionRetry,
4
+ resolveExtractRetries,
5
+ } from "./extract-with-retry.ts";
6
+
7
+ type Row = { id: string; definition: string | null };
8
+
9
+ const hasNullDefinition = (r: Row) => r.definition === null;
10
+
11
+ describe("resolveExtractRetries", () => {
12
+ const originalEnv = process.env.PGDELTA_EXTRACT_RETRIES;
13
+ afterEach(() => {
14
+ if (originalEnv === undefined) {
15
+ process.env.PGDELTA_EXTRACT_RETRIES = undefined;
16
+ delete process.env.PGDELTA_EXTRACT_RETRIES;
17
+ } else {
18
+ process.env.PGDELTA_EXTRACT_RETRIES = originalEnv;
19
+ }
20
+ });
21
+
22
+ test("defaults to 1 when option and env are unset", () => {
23
+ delete process.env.PGDELTA_EXTRACT_RETRIES;
24
+ expect(resolveExtractRetries()).toBe(1);
25
+ });
26
+
27
+ test("uses option when provided", () => {
28
+ process.env.PGDELTA_EXTRACT_RETRIES = "5";
29
+ expect(resolveExtractRetries(0)).toBe(0);
30
+ expect(resolveExtractRetries(1)).toBe(1);
31
+ expect(resolveExtractRetries(7)).toBe(7);
32
+ });
33
+
34
+ test("falls back to env when option is undefined", () => {
35
+ process.env.PGDELTA_EXTRACT_RETRIES = "4";
36
+ expect(resolveExtractRetries()).toBe(4);
37
+ });
38
+
39
+ test("clamps negative values to 0", () => {
40
+ delete process.env.PGDELTA_EXTRACT_RETRIES;
41
+ expect(resolveExtractRetries(-3)).toBe(0);
42
+ process.env.PGDELTA_EXTRACT_RETRIES = "-9";
43
+ expect(resolveExtractRetries()).toBe(0);
44
+ });
45
+
46
+ test("ignores non-numeric env values", () => {
47
+ process.env.PGDELTA_EXTRACT_RETRIES = "not-a-number";
48
+ expect(resolveExtractRetries()).toBe(1);
49
+ });
50
+
51
+ test("ignores empty env string", () => {
52
+ process.env.PGDELTA_EXTRACT_RETRIES = "";
53
+ expect(resolveExtractRetries()).toBe(1);
54
+ });
55
+ });
56
+
57
+ describe("extractWithDefinitionRetry", () => {
58
+ test("returns first attempt when no row has null definition", async () => {
59
+ let attempts = 0;
60
+ const rows = await extractWithDefinitionRetry<Row>({
61
+ label: "test",
62
+ query: async () => {
63
+ attempts++;
64
+ return [{ id: "a", definition: "OK" }];
65
+ },
66
+ hasNullDefinition,
67
+ options: { retries: 2, backoffMs: 0 },
68
+ });
69
+ expect(attempts).toBe(1);
70
+ expect(rows).toEqual([{ id: "a", definition: "OK" }]);
71
+ });
72
+
73
+ test("retries when definition is null and succeeds on attempt 2", async () => {
74
+ let attempts = 0;
75
+ const rows = await extractWithDefinitionRetry<Row>({
76
+ label: "test",
77
+ query: async () => {
78
+ attempts++;
79
+ if (attempts === 1) {
80
+ return [
81
+ { id: "a", definition: "OK" },
82
+ { id: "b", definition: null },
83
+ ];
84
+ }
85
+ return [{ id: "a", definition: "OK" }];
86
+ },
87
+ hasNullDefinition,
88
+ options: { retries: 2, backoffMs: 0 },
89
+ });
90
+ expect(attempts).toBe(2);
91
+ expect(rows).toEqual([{ id: "a", definition: "OK" }]);
92
+ });
93
+
94
+ test("returns last-attempt rows (with offenders) once retries are exhausted", async () => {
95
+ let attempts = 0;
96
+ const rows = await extractWithDefinitionRetry<Row>({
97
+ label: "test",
98
+ query: async () => {
99
+ attempts++;
100
+ return [
101
+ { id: "a", definition: "OK" },
102
+ { id: "b", definition: null },
103
+ ];
104
+ },
105
+ hasNullDefinition,
106
+ options: { retries: 2, backoffMs: 0 },
107
+ });
108
+ expect(attempts).toBe(3);
109
+ expect(rows).toEqual([
110
+ { id: "a", definition: "OK" },
111
+ { id: "b", definition: null },
112
+ ]);
113
+ });
114
+
115
+ test("retries: 0 disables retrying entirely", async () => {
116
+ let attempts = 0;
117
+ const rows = await extractWithDefinitionRetry<Row>({
118
+ label: "test",
119
+ query: async () => {
120
+ attempts++;
121
+ return [{ id: "b", definition: null }];
122
+ },
123
+ hasNullDefinition,
124
+ options: { retries: 0, backoffMs: 0 },
125
+ });
126
+ expect(attempts).toBe(1);
127
+ expect(rows).toEqual([{ id: "b", definition: null }]);
128
+ });
129
+
130
+ test("retries: 5 attempts up to 6 times before giving up", async () => {
131
+ let attempts = 0;
132
+ await extractWithDefinitionRetry<Row>({
133
+ label: "test",
134
+ query: async () => {
135
+ attempts++;
136
+ return [{ id: "b", definition: null }];
137
+ },
138
+ hasNullDefinition,
139
+ options: { retries: 5, backoffMs: 0 },
140
+ });
141
+ expect(attempts).toBe(6);
142
+ });
143
+ });
@@ -0,0 +1,87 @@
1
+ import debug from "debug";
2
+
3
+ const log = debug("pg-delta:extract");
4
+
5
+ const DEFAULT_RETRIES = 1;
6
+ const DEFAULT_BACKOFF_MS = 50;
7
+
8
+ export interface ExtractRetryOptions {
9
+ /**
10
+ * Number of retry attempts to make when a `pg_get_*def()` call returns NULL
11
+ * for at least one row. Total attempts is `retries + 1`. Negative values are
12
+ * clamped to 0. When this option is undefined the value is read from the
13
+ * `PGDELTA_EXTRACT_RETRIES` environment variable, falling back to a default
14
+ * of 1 (i.e. the first attempt plus one retry, 2 attempts total).
15
+ */
16
+ retries?: number;
17
+ /**
18
+ * Delay between retry attempts in milliseconds; the actual wait is
19
+ * `backoffMs * attemptNumber` (linear). Defaults to 50. Set to 0 in tests.
20
+ */
21
+ backoffMs?: number;
22
+ }
23
+
24
+ export function resolveExtractRetries(option?: number): number {
25
+ if (typeof option === "number" && Number.isFinite(option)) {
26
+ return Math.max(0, Math.floor(option));
27
+ }
28
+ const envVal = process.env.PGDELTA_EXTRACT_RETRIES;
29
+ if (envVal !== undefined && envVal !== "") {
30
+ const n = Number(envVal);
31
+ if (Number.isFinite(n)) return Math.max(0, Math.floor(n));
32
+ }
33
+ return DEFAULT_RETRIES;
34
+ }
35
+
36
+ const sleep = (ms: number) =>
37
+ ms > 0 ? new Promise<void>((r) => setTimeout(r, ms)) : Promise.resolve();
38
+
39
+ /**
40
+ * Runs `query()` up to `retries + 1` times, retrying as long as at least one
41
+ * row in the result satisfies `hasNullDefinition`. The retry exists because
42
+ * `pg_get_<x>def()` can return NULL transiently when the underlying catalog
43
+ * row is dropped concurrently or the catalog state is in flux; in practice a
44
+ * second attempt either no longer sees the dropped row or succeeds in
45
+ * resolving the definition.
46
+ *
47
+ * Returns the rows from the first attempt with no offenders, or — once
48
+ * retries are exhausted — the rows from the final attempt (still containing
49
+ * offenders). The caller is responsible for the final filter so this helper
50
+ * works for both flat schemas (definition on the row) and nested schemas
51
+ * (definition on a child collection, e.g. table constraints).
52
+ */
53
+ export async function extractWithDefinitionRetry<TRow>(params: {
54
+ label: string;
55
+ query: () => Promise<TRow[]>;
56
+ hasNullDefinition: (row: TRow) => boolean;
57
+ options?: ExtractRetryOptions;
58
+ }): Promise<TRow[]> {
59
+ const retries = resolveExtractRetries(params.options?.retries);
60
+ const backoffMs = params.options?.backoffMs ?? DEFAULT_BACKOFF_MS;
61
+ const maxAttempts = retries + 1;
62
+
63
+ let rows: TRow[] = [];
64
+ for (let attempt = 1; attempt <= maxAttempts; attempt++) {
65
+ rows = await params.query();
66
+ const offenders = rows.filter(params.hasNullDefinition).length;
67
+ if (offenders === 0) return rows;
68
+ if (attempt < maxAttempts) {
69
+ log(
70
+ "%s: pg_get_*def() returned NULL for %d row(s) on attempt %d/%d; retrying",
71
+ params.label,
72
+ offenders,
73
+ attempt,
74
+ maxAttempts,
75
+ );
76
+ await sleep(backoffMs * attempt);
77
+ } else {
78
+ log(
79
+ "%s: pg_get_*def() returned NULL for %d row(s) after %d attempt(s); skipping",
80
+ params.label,
81
+ offenders,
82
+ maxAttempts,
83
+ );
84
+ }
85
+ }
86
+ return rows;
87
+ }
@@ -104,7 +104,6 @@ export function diffIndexes(
104
104
  "nulls_not_distinct",
105
105
  "immediate",
106
106
  "is_clustered",
107
- "is_replica_identity",
108
107
  "column_collations",
109
108
  "operator_classes",
110
109
  "column_options",
@@ -36,8 +36,19 @@ const baseRow = {
36
36
  const mockPool = (rows: unknown[]): Pool =>
37
37
  ({ query: async () => ({ rows }) }) as unknown as Pool;
38
38
 
39
+ const mockPoolSequence = (...attempts: unknown[][]): Pool => {
40
+ let i = 0;
41
+ return {
42
+ query: async () => ({
43
+ rows: attempts[Math.min(i++, attempts.length - 1)],
44
+ }),
45
+ } as unknown as Pool;
46
+ };
47
+
48
+ const NO_BACKOFF = { backoffMs: 0 } as const;
49
+
39
50
  describe("extractIndexes", () => {
40
- test("skips rows where pg_get_indexdef returned NULL", async () => {
51
+ test("skips rows where pg_get_indexdef returned NULL after exhausting retries", async () => {
41
52
  const indexes = await extractIndexes(
42
53
  mockPool([
43
54
  {
@@ -47,6 +58,7 @@ describe("extractIndexes", () => {
47
58
  },
48
59
  { ...baseRow, name: '"orphan_idx"', definition: null },
49
60
  ]),
61
+ NO_BACKOFF,
50
62
  );
51
63
 
52
64
  expect(indexes).toHaveLength(1);
@@ -59,6 +71,7 @@ describe("extractIndexes", () => {
59
71
  await expect(
60
72
  extractIndexes(
61
73
  mockPool([{ ...baseRow, name: '"orphan"', definition: null }]),
74
+ NO_BACKOFF,
62
75
  ),
63
76
  ).resolves.toEqual([]);
64
77
  });
@@ -77,7 +90,30 @@ describe("extractIndexes", () => {
77
90
  definition: "CREATE INDEX b ON users (id)",
78
91
  },
79
92
  ]),
93
+ NO_BACKOFF,
80
94
  );
81
95
  expect(indexes.map((i) => i.name)).toEqual(['"a"', '"b"']);
82
96
  });
97
+
98
+ test("recovers when pg_get_indexdef is NULL on first attempt but resolved on retry", async () => {
99
+ const indexes = await extractIndexes(
100
+ mockPoolSequence(
101
+ // attempt 1: definition is NULL (transient race)
102
+ [{ ...baseRow, name: '"racy_idx"', definition: null }],
103
+ // attempt 2: catalog scan no longer sees the dropped row, or
104
+ // pg_get_indexdef successfully resolves the definition
105
+ [
106
+ {
107
+ ...baseRow,
108
+ name: '"racy_idx"',
109
+ definition: "CREATE INDEX racy_idx ON users (id)",
110
+ },
111
+ ],
112
+ ),
113
+ { retries: 2, backoffMs: 0 },
114
+ );
115
+ expect(indexes).toHaveLength(1);
116
+ expect(indexes[0]?.name).toBe('"racy_idx"');
117
+ expect(indexes[0]?.definition).toBe("CREATE INDEX racy_idx ON users (id)");
118
+ });
83
119
  });