@supabase/pg-delta 1.0.0-alpha.21 → 1.0.0-alpha.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. package/dist/core/catalog.diff.js +4 -3
  2. package/dist/core/catalog.model.d.ts +8 -1
  3. package/dist/core/catalog.model.js +9 -8
  4. package/dist/core/expand-replace-dependencies.js +23 -0
  5. package/dist/core/objects/extract-with-retry.d.ts +36 -0
  6. package/dist/core/objects/extract-with-retry.js +51 -0
  7. package/dist/core/objects/index/index.diff.js +0 -1
  8. package/dist/core/objects/index/index.model.d.ts +2 -3
  9. package/dist/core/objects/index/index.model.js +17 -6
  10. package/dist/core/objects/materialized-view/materialized-view.model.d.ts +2 -1
  11. package/dist/core/objects/materialized-view/materialized-view.model.js +20 -4
  12. package/dist/core/objects/procedure/procedure.model.d.ts +2 -1
  13. package/dist/core/objects/procedure/procedure.model.js +20 -4
  14. package/dist/core/objects/rls-policy/rls-policy.diff.js +13 -1
  15. package/dist/core/objects/rule/rule.model.d.ts +2 -1
  16. package/dist/core/objects/rule/rule.model.js +20 -3
  17. package/dist/core/objects/sequence/sequence.diff.d.ts +2 -1
  18. package/dist/core/objects/sequence/sequence.diff.js +28 -4
  19. package/dist/core/objects/table/changes/table.alter.d.ts +12 -1
  20. package/dist/core/objects/table/changes/table.alter.js +20 -2
  21. package/dist/core/objects/table/table.diff.js +19 -15
  22. package/dist/core/objects/table/table.model.d.ts +6 -1
  23. package/dist/core/objects/table/table.model.js +40 -5
  24. package/dist/core/objects/trigger/trigger.model.d.ts +2 -1
  25. package/dist/core/objects/trigger/trigger.model.js +20 -4
  26. package/dist/core/objects/utils.d.ts +1 -0
  27. package/dist/core/objects/utils.js +3 -0
  28. package/dist/core/objects/view/view.model.d.ts +2 -1
  29. package/dist/core/objects/view/view.model.js +20 -4
  30. package/dist/core/plan/create.js +3 -1
  31. package/dist/core/plan/types.d.ts +8 -0
  32. package/dist/core/{post-diff-cycle-breaking.d.ts → post-diff-normalization.d.ts} +8 -1
  33. package/dist/core/post-diff-normalization.js +202 -0
  34. package/dist/core/sort/cycle-breakers.js +1 -1
  35. package/dist/core/sort/utils.d.ts +10 -0
  36. package/dist/core/sort/utils.js +28 -0
  37. package/package.json +1 -1
  38. package/src/core/catalog.diff.ts +4 -2
  39. package/src/core/catalog.model.ts +20 -8
  40. package/src/core/expand-replace-dependencies.test.ts +131 -0
  41. package/src/core/expand-replace-dependencies.ts +24 -0
  42. package/src/core/objects/extract-with-retry.test.ts +143 -0
  43. package/src/core/objects/extract-with-retry.ts +87 -0
  44. package/src/core/objects/index/index.diff.ts +0 -1
  45. package/src/core/objects/index/index.model.test.ts +37 -1
  46. package/src/core/objects/index/index.model.ts +25 -6
  47. package/src/core/objects/materialized-view/materialized-view.model.test.ts +93 -0
  48. package/src/core/objects/materialized-view/materialized-view.model.ts +27 -4
  49. package/src/core/objects/procedure/procedure.model.test.ts +117 -0
  50. package/src/core/objects/procedure/procedure.model.ts +28 -5
  51. package/src/core/objects/rls-policy/rls-policy.diff.ts +19 -1
  52. package/src/core/objects/rule/rule.model.test.ts +99 -0
  53. package/src/core/objects/rule/rule.model.ts +28 -4
  54. package/src/core/objects/sequence/sequence.diff.test.ts +87 -0
  55. package/src/core/objects/sequence/sequence.diff.ts +31 -6
  56. package/src/core/objects/table/changes/table.alter.test.ts +13 -21
  57. package/src/core/objects/table/changes/table.alter.ts +30 -3
  58. package/src/core/objects/table/table.diff.ts +24 -19
  59. package/src/core/objects/table/table.model.test.ts +209 -0
  60. package/src/core/objects/table/table.model.ts +52 -7
  61. package/src/core/objects/trigger/trigger.model.test.ts +113 -0
  62. package/src/core/objects/trigger/trigger.model.ts +28 -5
  63. package/src/core/objects/utils.ts +3 -0
  64. package/src/core/objects/view/view.model.test.ts +90 -0
  65. package/src/core/objects/view/view.model.ts +28 -5
  66. package/src/core/plan/create.ts +3 -1
  67. package/src/core/plan/types.ts +8 -0
  68. package/src/core/{post-diff-cycle-breaking.test.ts → post-diff-normalization.test.ts} +168 -4
  69. package/src/core/post-diff-normalization.ts +260 -0
  70. package/src/core/sort/cycle-breakers.ts +1 -1
  71. package/src/core/sort/utils.ts +38 -0
  72. package/dist/core/post-diff-cycle-breaking.js +0 -100
  73. package/src/core/post-diff-cycle-breaking.ts +0 -138
@@ -71,6 +71,9 @@ export const stableId = {
71
71
  constraint(schema: string, table: string, constraint: string) {
72
72
  return `constraint:${schema}.${table}.${constraint}` as const;
73
73
  },
74
+ index(schema: string, table: string, indexName: string) {
75
+ return `index:${schema}.${table}.${indexName}` as const;
76
+ },
74
77
  comment(objectStableId: string) {
75
78
  return `comment:${objectStableId}` as const;
76
79
  },
@@ -0,0 +1,90 @@
1
+ import { describe, expect, test } from "bun:test";
2
+ import type { Pool } from "pg";
3
+ import { extractViews, View } from "./view.model.ts";
4
+
5
+ const baseRow = {
6
+ schema: "public",
7
+ row_security: false,
8
+ force_row_security: false,
9
+ has_indexes: false,
10
+ has_rules: false,
11
+ has_triggers: false,
12
+ has_subclasses: false,
13
+ is_populated: true,
14
+ replica_identity: "d" as const,
15
+ is_partition: false,
16
+ options: null,
17
+ partition_bound: null,
18
+ owner: "postgres",
19
+ comment: null,
20
+ columns: [],
21
+ privileges: [],
22
+ };
23
+
24
+ const mockPool = (rows: unknown[]): Pool =>
25
+ ({ query: async () => ({ rows }) }) as unknown as Pool;
26
+
27
+ const mockPoolSequence = (...attempts: unknown[][]): Pool => {
28
+ let i = 0;
29
+ return {
30
+ query: async () => ({
31
+ rows: attempts[Math.min(i++, attempts.length - 1)],
32
+ }),
33
+ } as unknown as Pool;
34
+ };
35
+
36
+ const NO_BACKOFF = { backoffMs: 0 } as const;
37
+
38
+ describe("extractViews", () => {
39
+ test("skips rows where pg_get_viewdef returned NULL after exhausting retries", async () => {
40
+ const views = await extractViews(
41
+ mockPool([
42
+ {
43
+ ...baseRow,
44
+ name: '"good_view"',
45
+ definition: "SELECT 1",
46
+ },
47
+ { ...baseRow, name: '"orphan_view"', definition: null },
48
+ ]),
49
+ NO_BACKOFF,
50
+ );
51
+
52
+ expect(views).toHaveLength(1);
53
+ expect(views[0]).toBeInstanceOf(View);
54
+ expect(views[0]?.name).toBe('"good_view"');
55
+ expect(views[0]?.definition).toBe("SELECT 1");
56
+ });
57
+
58
+ test("does not throw ZodError when the only row has a null definition", async () => {
59
+ await expect(
60
+ extractViews(
61
+ mockPool([{ ...baseRow, name: '"orphan"', definition: null }]),
62
+ NO_BACKOFF,
63
+ ),
64
+ ).resolves.toEqual([]);
65
+ });
66
+
67
+ test("returns all views when every row has a valid definition", async () => {
68
+ const views = await extractViews(
69
+ mockPool([
70
+ { ...baseRow, name: '"a"', definition: "SELECT 1" },
71
+ { ...baseRow, name: '"b"', definition: "SELECT 2" },
72
+ ]),
73
+ NO_BACKOFF,
74
+ );
75
+ expect(views.map((v) => v.name)).toEqual(['"a"', '"b"']);
76
+ });
77
+
78
+ test("recovers when pg_get_viewdef is NULL on first attempt but resolved on retry", async () => {
79
+ const views = await extractViews(
80
+ mockPoolSequence(
81
+ [{ ...baseRow, name: '"racy_view"', definition: null }],
82
+ [{ ...baseRow, name: '"racy_view"', definition: "SELECT 42" }],
83
+ ),
84
+ { retries: 2, backoffMs: 0 },
85
+ );
86
+ expect(views).toHaveLength(1);
87
+ expect(views[0]?.name).toBe('"racy_view"');
88
+ expect(views[0]?.definition).toBe("SELECT 42");
89
+ });
90
+ });
@@ -11,6 +11,10 @@ import {
11
11
  type PrivilegeProps,
12
12
  privilegePropsSchema,
13
13
  } from "../base.privilege-diff.ts";
14
+ import {
15
+ type ExtractRetryOptions,
16
+ extractWithDefinitionRetry,
17
+ } from "../extract-with-retry.ts";
14
18
  import { ReplicaIdentitySchema } from "../table/table.model.ts";
15
19
 
16
20
  const viewPropsSchema = z.object({
@@ -34,6 +38,15 @@ const viewPropsSchema = z.object({
34
38
  privileges: z.array(privilegePropsSchema),
35
39
  });
36
40
 
41
+ // pg_get_viewdef(oid) can return NULL when the underlying view (or its
42
+ // pg_rewrite row) is dropped between catalog scan and resolution, or under
43
+ // transient catalog state during recovery. An unreadable view cannot be
44
+ // diffed, so we accept NULL here and filter the row out at extraction time
45
+ // rather than crashing the whole catalog parse with a ZodError.
46
+ const viewRowSchema = viewPropsSchema.extend({
47
+ definition: z.string().nullable(),
48
+ });
49
+
37
50
  type ViewPrivilegeProps = PrivilegeProps;
38
51
  export type ViewProps = z.infer<typeof viewPropsSchema>;
39
52
 
@@ -126,8 +139,16 @@ export class View extends BasePgModel implements TableLikeObject {
126
139
  }
127
140
  }
128
141
 
129
- export async function extractViews(pool: Pool): Promise<View[]> {
130
- const { rows: viewRows } = await pool.query<ViewProps>(sql`
142
+ export async function extractViews(
143
+ pool: Pool,
144
+ options?: ExtractRetryOptions,
145
+ ): Promise<View[]> {
146
+ const viewRows = await extractWithDefinitionRetry({
147
+ label: "views",
148
+ options,
149
+ hasNullDefinition: (row) => row.definition === null,
150
+ query: async () => {
151
+ const result = await pool.query<ViewProps>(sql`
131
152
  with extension_oids as (
132
153
  select
133
154
  objid
@@ -254,9 +275,11 @@ group by
254
275
  order by
255
276
  v.schema, v.name
256
277
  `);
257
- // Validate and parse each row using the Zod schema
258
- const validatedRows = viewRows.map((row: unknown) =>
259
- viewPropsSchema.parse(row),
278
+ return result.rows.map((row: unknown) => viewRowSchema.parse(row));
279
+ },
280
+ });
281
+ const validatedRows = viewRows.filter(
282
+ (row): row is ViewProps => row.definition !== null,
260
283
  );
261
284
  return validatedRows.map((row: ViewProps) => new View(row));
262
285
  }
@@ -100,7 +100,9 @@ export async function createPlan(
100
100
  }
101
101
  const resolved = await resolvePool(input, label);
102
102
  pools.push(resolved);
103
- return extractCatalog(resolved.pool);
103
+ return extractCatalog(resolved.pool, {
104
+ extractRetries: options.extractRetries,
105
+ });
104
106
  };
105
107
 
106
108
  const pools: Array<{ pool: Pool; shouldClose: boolean }> = [];
@@ -165,4 +165,12 @@ export interface CreatePlanOptions {
165
165
  * the output must be self-contained and not rely on statement execution order.
166
166
  */
167
167
  skipDefaultPrivilegeSubtraction?: boolean;
168
+ /**
169
+ * Number of retry attempts for catalog extractors when `pg_get_*def()`
170
+ * returns NULL for at least one row (a transient race with concurrent DDL).
171
+ * Total attempts is `extractRetries + 1`. When undefined, the value is read
172
+ * from the `PGDELTA_EXTRACT_RETRIES` environment variable, falling back to
173
+ * a default of 1 (i.e. the first attempt plus one retry, 2 attempts total).
174
+ */
175
+ extractRetries?: number;
168
176
  }
@@ -1,5 +1,8 @@
1
1
  import { describe, expect, test } from "bun:test";
2
2
  import type { Change } from "./change.types.ts";
3
+ import { CreateIndex } from "./objects/index/changes/index.create.ts";
4
+ import { DropIndex } from "./objects/index/changes/index.drop.ts";
5
+ import { Index, type IndexProps } from "./objects/index/index.model.ts";
3
6
  import {
4
7
  AlterTableAddConstraint,
5
8
  AlterTableChangeOwner,
@@ -14,7 +17,7 @@ import { CreateTable } from "./objects/table/changes/table.create.ts";
14
17
  import { DropTable } from "./objects/table/changes/table.drop.ts";
15
18
  import { GrantTablePrivileges } from "./objects/table/changes/table.privilege.ts";
16
19
  import { Table } from "./objects/table/table.model.ts";
17
- import { normalizePostDiffCycles } from "./post-diff-cycle-breaking.ts";
20
+ import { normalizePostDiffChanges } from "./post-diff-normalization.ts";
18
21
 
19
22
  const baseTableProps = {
20
23
  schema: "public",
@@ -59,7 +62,7 @@ function integerColumn(name: string, position: number) {
59
62
  };
60
63
  }
61
64
 
62
- describe("normalizePostDiffCycles", () => {
65
+ describe("normalizePostDiffChanges", () => {
63
66
  test("prunes same-table drop-column and drop-constraint ALTERs for replaced tables only", async () => {
64
67
  const mainChildren = new Table({
65
68
  ...baseTableProps,
@@ -149,7 +152,7 @@ describe("normalizePostDiffCycles", () => {
149
152
  preExistingGrant,
150
153
  ];
151
154
 
152
- const normalized = normalizePostDiffCycles({
155
+ const normalized = normalizePostDiffChanges({
153
156
  changes,
154
157
  replacedTableIds: new Set([mainChildren.stableId]),
155
158
  });
@@ -273,7 +276,7 @@ describe("normalizePostDiffCycles", () => {
273
276
  expansionComment,
274
277
  ];
275
278
 
276
- const normalized = normalizePostDiffCycles({
279
+ const normalized = normalizePostDiffChanges({
277
280
  changes,
278
281
  replacedTableIds: new Set([branchChildren.stableId]),
279
282
  });
@@ -300,4 +303,165 @@ describe("normalizePostDiffCycles", () => {
300
303
  ),
301
304
  ).toHaveLength(1);
302
305
  });
306
+
307
+ describe("restoreReplicaIdentityAfterIndexReplace", () => {
308
+ const baseIndexProps: IndexProps = {
309
+ schema: "public",
310
+ table_name: "replicated",
311
+ name: "tenant_idx",
312
+ storage_params: [],
313
+ statistics_target: [],
314
+ index_type: "btree",
315
+ tablespace: null,
316
+ is_unique: true,
317
+ is_primary: false,
318
+ is_exclusion: false,
319
+ nulls_not_distinct: false,
320
+ immediate: true,
321
+ is_clustered: false,
322
+ is_replica_identity: true,
323
+ key_columns: [],
324
+ column_collations: [],
325
+ operator_classes: [],
326
+ column_options: [],
327
+ index_expressions: null,
328
+ partial_predicate: null,
329
+ table_relkind: "r",
330
+ is_owned_by_constraint: false,
331
+ is_partitioned_index: false,
332
+ is_index_partition: false,
333
+ parent_index_name: null,
334
+ definition: "CREATE UNIQUE INDEX tenant_idx ON public.replicated (a)",
335
+ comment: null,
336
+ owner: "postgres",
337
+ };
338
+
339
+ function makeBranchTable(replicaIdentityIndex: string | null) {
340
+ return new Table({
341
+ ...baseTableProps,
342
+ name: "replicated",
343
+ replica_identity: replicaIdentityIndex ? "i" : "d",
344
+ replica_identity_index: replicaIdentityIndex,
345
+ columns: [
346
+ { ...integerColumn("id", 1), not_null: true },
347
+ integerColumn("a", 2),
348
+ ],
349
+ });
350
+ }
351
+
352
+ test("re-emits ALTER TABLE … REPLICA IDENTITY USING INDEX after a DropIndex+CreateIndex pair", () => {
353
+ const branchTable = makeBranchTable("tenant_idx");
354
+ const oldIndex = new Index(baseIndexProps);
355
+ const newIndex = new Index({
356
+ ...baseIndexProps,
357
+ definition:
358
+ "CREATE UNIQUE INDEX tenant_idx ON public.replicated (a, id)",
359
+ });
360
+
361
+ const changes: Change[] = [
362
+ new DropIndex({ index: oldIndex }),
363
+ new CreateIndex({ index: newIndex, indexableObject: branchTable }),
364
+ ];
365
+
366
+ const normalized = normalizePostDiffChanges({
367
+ changes,
368
+ branchTables: { [branchTable.stableId]: branchTable },
369
+ });
370
+
371
+ expect(normalized.map((c) => c.constructor.name)).toEqual([
372
+ "DropIndex",
373
+ "CreateIndex",
374
+ "AlterTableSetReplicaIdentity",
375
+ ]);
376
+
377
+ const inserted = normalized[2] as AlterTableSetReplicaIdentity;
378
+ expect(inserted.mode).toBe("i");
379
+ expect(inserted.indexName).toBe("tenant_idx");
380
+ expect(inserted.requires).toEqual([
381
+ "table:public.replicated",
382
+ "index:public.replicated.tenant_idx",
383
+ ]);
384
+ });
385
+
386
+ test("does not double-emit when diffTables already produced an AlterTableSetReplicaIdentity for the same table", () => {
387
+ const branchTable = makeBranchTable("tenant_idx");
388
+ const oldIndex = new Index(baseIndexProps);
389
+ const newIndex = new Index({
390
+ ...baseIndexProps,
391
+ definition:
392
+ "CREATE UNIQUE INDEX tenant_idx ON public.replicated (a, id)",
393
+ });
394
+
395
+ const changes: Change[] = [
396
+ new DropIndex({ index: oldIndex }),
397
+ new CreateIndex({ index: newIndex, indexableObject: branchTable }),
398
+ new AlterTableSetReplicaIdentity({
399
+ table: branchTable,
400
+ mode: "i",
401
+ indexName: "tenant_idx",
402
+ }),
403
+ ];
404
+
405
+ const normalized = normalizePostDiffChanges({
406
+ changes,
407
+ branchTables: { [branchTable.stableId]: branchTable },
408
+ });
409
+
410
+ expect(
411
+ normalized.filter((c) => c instanceof AlterTableSetReplicaIdentity),
412
+ ).toHaveLength(1);
413
+ });
414
+
415
+ test("ignores DropIndex without a matching CreateIndex (pure drop)", () => {
416
+ // Pure drop: the user removed the index entirely. The table.diff path is
417
+ // responsible for emitting the corresponding REPLICA IDENTITY DEFAULT.
418
+ // The post-diff pass must not synthesize a USING INDEX setter for an
419
+ // index that no longer exists.
420
+ const branchTable = makeBranchTable(null);
421
+ const oldIndex = new Index(baseIndexProps);
422
+
423
+ const changes: Change[] = [new DropIndex({ index: oldIndex })];
424
+
425
+ const normalized = normalizePostDiffChanges({
426
+ changes,
427
+ branchTables: { [branchTable.stableId]: branchTable },
428
+ });
429
+
430
+ expect(
431
+ normalized.filter((c) => c instanceof AlterTableSetReplicaIdentity),
432
+ ).toHaveLength(0);
433
+ });
434
+
435
+ test("ignores indexes that are not the table's replica identity", () => {
436
+ // The table has replica_identity = 'd', so even if some other index is
437
+ // being replaced, no setter should be injected.
438
+ const branchTable = makeBranchTable(null);
439
+ const otherIndex = new Index({
440
+ ...baseIndexProps,
441
+ name: "some_other_idx",
442
+ is_replica_identity: false,
443
+ definition: "CREATE INDEX some_other_idx ON public.replicated (a)",
444
+ });
445
+ const newOtherIndex = new Index({
446
+ ...baseIndexProps,
447
+ name: "some_other_idx",
448
+ is_replica_identity: false,
449
+ definition: "CREATE INDEX some_other_idx ON public.replicated (a, id)",
450
+ });
451
+
452
+ const changes: Change[] = [
453
+ new DropIndex({ index: otherIndex }),
454
+ new CreateIndex({ index: newOtherIndex, indexableObject: branchTable }),
455
+ ];
456
+
457
+ const normalized = normalizePostDiffChanges({
458
+ changes,
459
+ branchTables: { [branchTable.stableId]: branchTable },
460
+ });
461
+
462
+ expect(
463
+ normalized.filter((c) => c instanceof AlterTableSetReplicaIdentity),
464
+ ).toHaveLength(0);
465
+ });
466
+ });
303
467
  });
@@ -0,0 +1,260 @@
1
+ import type { Change } from "./change.types.ts";
2
+ import { CreateIndex } from "./objects/index/changes/index.create.ts";
3
+ import { DropIndex } from "./objects/index/changes/index.drop.ts";
4
+ import {
5
+ AlterTableAddConstraint,
6
+ AlterTableDropColumn,
7
+ AlterTableDropConstraint,
8
+ AlterTableSetReplicaIdentity,
9
+ AlterTableValidateConstraint,
10
+ } from "./objects/table/changes/table.alter.ts";
11
+ import { CreateCommentOnConstraint } from "./objects/table/changes/table.comment.ts";
12
+ import type { Table } from "./objects/table/table.model.ts";
13
+ import { stableId } from "./objects/utils.ts";
14
+
15
+ function constraintStableId(
16
+ table: { schema: string; name: string },
17
+ constraintName: string,
18
+ ) {
19
+ return stableId.constraint(table.schema, table.name, constraintName);
20
+ }
21
+
22
+ function isSupersededByTableReplacement(
23
+ change: Change,
24
+ replacedTableIds: ReadonlySet<string>,
25
+ ): boolean {
26
+ if (
27
+ !(change instanceof AlterTableDropColumn) &&
28
+ !(change instanceof AlterTableDropConstraint)
29
+ ) {
30
+ return false;
31
+ }
32
+ return replacedTableIds.has(change.table.stableId);
33
+ }
34
+
35
+ /**
36
+ * Drop earlier duplicates of `AlterTableAddConstraint` /
37
+ * `AlterTableValidateConstraint` / `CreateCommentOnConstraint` targeting
38
+ * replaced tables, keeping only the last occurrence of each
39
+ * `(changeType, table.stableId, constraint.name)`.
40
+ *
41
+ * When `expandReplaceDependencies()` promotes a table to a full
42
+ * `DropTable + CreateTable` pair, it also emits one
43
+ * `AlterTableAddConstraint` (plus optional `VALIDATE CONSTRAINT` /
44
+ * `COMMENT ON CONSTRAINT`) per branch constraint. If `diffTables()` already
45
+ * emitted the same change for a shape flip or a new constraint on that
46
+ * table, the plan ends up with two identical `ALTER TABLE ... ADD
47
+ * CONSTRAINT ...` statements and PostgreSQL fails at apply time with
48
+ * `constraint "..." for relation "..." already exists`. Because
49
+ * `expandReplaceDependencies()` appends its additions after the original
50
+ * `diffTables()` output, the last occurrence is the expansion's emission —
51
+ * keeping it preserves correctness while removing the duplicate.
52
+ */
53
+ function dropReplacedTableDuplicateConstraintChanges(
54
+ changes: Change[],
55
+ replacedTableIds: ReadonlySet<string>,
56
+ ): Change[] {
57
+ if (replacedTableIds.size === 0) return changes;
58
+
59
+ const keyFor = (change: Change): string | null => {
60
+ if (
61
+ !(change instanceof AlterTableAddConstraint) &&
62
+ !(change instanceof AlterTableValidateConstraint) &&
63
+ !(change instanceof CreateCommentOnConstraint)
64
+ ) {
65
+ return null;
66
+ }
67
+ if (!replacedTableIds.has(change.table.stableId)) return null;
68
+ const tag =
69
+ change instanceof AlterTableAddConstraint
70
+ ? "add"
71
+ : change instanceof AlterTableValidateConstraint
72
+ ? "validate"
73
+ : "comment";
74
+ return `${tag}:${constraintStableId(change.table, change.constraint.name)}`;
75
+ };
76
+
77
+ const seen = new Set<string>();
78
+ const reversedKept: Change[] = [];
79
+ let mutated = false;
80
+
81
+ // Walk backwards: the first encounter of each key corresponds to its LAST
82
+ // occurrence in the original order. `expandReplaceDependencies()` appends
83
+ // additions after the original changes, so "last wins" keeps the
84
+ // expansion's emission and drops the earlier diffTables duplicate.
85
+ for (let i = changes.length - 1; i >= 0; i--) {
86
+ const change = changes[i] as Change;
87
+ const key = keyFor(change);
88
+ if (key !== null) {
89
+ if (seen.has(key)) {
90
+ mutated = true;
91
+ continue;
92
+ }
93
+ seen.add(key);
94
+ }
95
+ reversedKept.push(change);
96
+ }
97
+
98
+ return mutated ? reversedKept.reverse() : changes;
99
+ }
100
+
101
+ /**
102
+ * Re-emit `ALTER TABLE ... REPLICA IDENTITY USING INDEX <idx>` after any
103
+ * `DropIndex(idx) + CreateIndex(idx)` pair where `idx` is the replica-identity
104
+ * index of a branch table.
105
+ *
106
+ * Background: PostgreSQL silently flips a table's `relreplident` to `'d'`
107
+ * (DEFAULT) when the index it points to is dropped. `CREATE INDEX` cannot
108
+ * restore the marker — only `ALTER TABLE ... REPLICA IDENTITY USING INDEX`
109
+ * can. When both main and branch carry `replica_identity = 'i'` pointing at
110
+ * the same index name, `diffTables()` emits no replica-identity change of its
111
+ * own, so the marker would be lost on apply.
112
+ *
113
+ * This is a whole-plan interaction: `diffTables()` cannot detect it without
114
+ * also looking at index changes. Per the "whole-plan interactions belong in
115
+ * post-diff normalization" rule in the package CLAUDE.md, the restoration
116
+ * lives here.
117
+ *
118
+ * Insertion is idempotent: if `diffTables()` already emitted the same
119
+ * `AlterTableSetReplicaIdentity` for this table (e.g. when the user is also
120
+ * switching the replica-identity index name in the same migration), no
121
+ * duplicate is added.
122
+ */
123
+ function restoreReplicaIdentityAfterIndexReplace(
124
+ changes: Change[],
125
+ branchTables: Record<string, Table>,
126
+ ): Change[] {
127
+ // Build the index-stable-id → owning-table map from branch state. Only
128
+ // tables in 'i' mode contribute, and only those whose configured index name
129
+ // is non-null (the extractor returns null for any other mode).
130
+ const replicaIdentityIndexToTable = new Map<string, Table>();
131
+ for (const table of Object.values(branchTables)) {
132
+ if (table.replica_identity !== "i" || !table.replica_identity_index) {
133
+ continue;
134
+ }
135
+ const indexId = stableId.index(
136
+ table.schema,
137
+ table.name,
138
+ table.replica_identity_index,
139
+ );
140
+ replicaIdentityIndexToTable.set(indexId, table);
141
+ }
142
+ if (replicaIdentityIndexToTable.size === 0) return changes;
143
+
144
+ // Find the indexes that are both dropped AND created in this plan. A pure
145
+ // drop or a pure create is handled by `diffTables()` directly (the table's
146
+ // replica_identity / replica_identity_index fields will have changed). The
147
+ // hole is specifically the drop+create pair that recreates the same name.
148
+ const droppedIndexIds = new Set<string>();
149
+ const createdIndexIds = new Set<string>();
150
+ for (const change of changes) {
151
+ if (change instanceof DropIndex) {
152
+ droppedIndexIds.add(change.index.stableId);
153
+ } else if (change instanceof CreateIndex) {
154
+ createdIndexIds.add(change.index.stableId);
155
+ }
156
+ }
157
+ const replacedIndexIds = new Set<string>();
158
+ for (const id of droppedIndexIds) {
159
+ if (createdIndexIds.has(id) && replicaIdentityIndexToTable.has(id)) {
160
+ replacedIndexIds.add(id);
161
+ }
162
+ }
163
+ if (replacedIndexIds.size === 0) return changes;
164
+
165
+ // Skip tables for which `diffTables()` already emitted a replica-identity
166
+ // setter — re-emitting would produce a redundant ALTER TABLE (harmless on
167
+ // apply, but noisy in plan output).
168
+ const tablesWithExistingReplicaIdentitySetter = new Set<string>();
169
+ for (const change of changes) {
170
+ if (change instanceof AlterTableSetReplicaIdentity) {
171
+ tablesWithExistingReplicaIdentitySetter.add(change.table.stableId);
172
+ }
173
+ }
174
+
175
+ // Insert one `AlterTableSetReplicaIdentity` per replaced index, immediately
176
+ // after the matching `CreateIndex`. The change's `requires` already names
177
+ // both the table and the recreated index, so the topo sort orders it
178
+ // correctly relative to the surrounding DDL.
179
+ const result: Change[] = [];
180
+ for (const change of changes) {
181
+ result.push(change);
182
+ if (
183
+ !(change instanceof CreateIndex) ||
184
+ !replacedIndexIds.has(change.index.stableId)
185
+ ) {
186
+ continue;
187
+ }
188
+ const table = replicaIdentityIndexToTable.get(change.index.stableId);
189
+ if (!table) continue;
190
+ if (tablesWithExistingReplicaIdentitySetter.has(table.stableId)) continue;
191
+
192
+ result.push(
193
+ new AlterTableSetReplicaIdentity({
194
+ table,
195
+ mode: "i",
196
+ indexName: table.replica_identity_index,
197
+ }),
198
+ );
199
+ // Mark as emitted so a second replaced index on the same table — if that
200
+ // ever arises — doesn't double-emit.
201
+ tablesWithExistingReplicaIdentitySetter.add(table.stableId);
202
+ }
203
+
204
+ return result;
205
+ }
206
+
207
+ /**
208
+ * Apply structural rewrites to the change list that are only obvious once
209
+ * every object diff has been collected. This pass does NOT prevent dependency
210
+ * cycles — that responsibility now lives in the sort phase, where
211
+ * `sortPhaseChanges` invokes `tryBreakCycleByChangeInjection` lazily on cycles
212
+ * that edge filtering can't break (FK SCC of dropped tables,
213
+ * AlterPublicationDropTables ↔ AlterTableDropColumn, …).
214
+ *
215
+ * Concretely, this pass:
216
+ *
217
+ * - Prunes `AlterTableDropColumn(T.*)` / `AlterTableDropConstraint(T.*)`
218
+ * changes that are made redundant by an expansion-emitted
219
+ * `DropTable(T) + CreateTable(T)` pair. Without this, the apply phase
220
+ * would try to drop a column that no longer exists in the freshly
221
+ * recreated table.
222
+ * - Dedupes duplicate `AlterTableAddConstraint` /
223
+ * `AlterTableValidateConstraint` / `CreateCommentOnConstraint` changes
224
+ * produced when `diffTables()` and `expandReplaceDependencies()` both
225
+ * emit the same constraint operation for a replaced table. Last write
226
+ * wins so the expansion's emission survives.
227
+ * - Re-emits `ALTER TABLE ... REPLICA IDENTITY USING INDEX <idx>` after any
228
+ * `DropIndex(idx) + CreateIndex(idx)` pair where `idx` is the replica
229
+ * identity index of a branch table — Postgres silently clears the marker
230
+ * when the underlying index is dropped, and `CREATE INDEX` cannot restore
231
+ * it.
232
+ *
233
+ * Object-local PostgreSQL semantics (for example owned-sequence cascades)
234
+ * stay in the corresponding `diff*` function instead of this pass.
235
+ */
236
+ export function normalizePostDiffChanges({
237
+ changes,
238
+ replacedTableIds = new Set<string>(),
239
+ branchTables = {},
240
+ }: {
241
+ changes: Change[];
242
+ replacedTableIds?: ReadonlySet<string>;
243
+ branchTables?: Record<string, Table>;
244
+ }): Change[] {
245
+ const restoredChanges = restoreReplicaIdentityAfterIndexReplace(
246
+ changes,
247
+ branchTables,
248
+ );
249
+
250
+ const dedupedChanges = dropReplacedTableDuplicateConstraintChanges(
251
+ restoredChanges,
252
+ replacedTableIds,
253
+ );
254
+
255
+ if (replacedTableIds.size === 0) return dedupedChanges;
256
+
257
+ return dedupedChanges.filter(
258
+ (change) => !isSupersededByTableReplacement(change, replacedTableIds),
259
+ );
260
+ }
@@ -284,7 +284,7 @@ function tryBreakPublicationColumnCycle(
284
284
 
285
285
  // Verify the table is NOT itself being dropped. If `DropTable(T)` is in
286
286
  // the same phase, the existing structural rewrites in
287
- // `post-diff-cycle-breaking.ts` (replace-expansion superseded filter)
287
+ // `post-diff-normalization.ts` (replace-expansion superseded filter)
288
288
  // already prune the redundant `AlterTableDropColumn`, so we should not
289
289
  // see this combination here. Be defensive and bail anyway — flipping
290
290
  // `omitTableRequirement` when T is being dropped would let the column