@supabase/pg-delta 1.0.0-alpha.13 → 1.0.0-alpha.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/README.md +7 -1
  2. package/dist/core/catalog.diff.js +7 -1
  3. package/dist/core/connection-url.d.ts +32 -0
  4. package/dist/core/connection-url.js +77 -0
  5. package/dist/core/expand-replace-dependencies.d.ts +8 -2
  6. package/dist/core/expand-replace-dependencies.js +24 -10
  7. package/dist/core/integrations/supabase.js +1 -0
  8. package/dist/core/objects/procedure/procedure.diff.js +8 -0
  9. package/dist/core/objects/sequence/sequence.diff.js +14 -6
  10. package/dist/core/objects/table/changes/table.alter.js +4 -1
  11. package/dist/core/objects/table/changes/table.drop.d.ts +12 -0
  12. package/dist/core/objects/table/changes/table.drop.js +20 -3
  13. package/dist/core/objects/table/table.diff.js +7 -2
  14. package/dist/core/post-diff-cycle-breaking.d.ts +22 -0
  15. package/dist/core/post-diff-cycle-breaking.js +143 -0
  16. package/dist/core/postgres-config.d.ts +27 -0
  17. package/dist/core/postgres-config.js +99 -7
  18. package/package.json +2 -1
  19. package/src/core/catalog.diff.ts +7 -1
  20. package/src/core/connection-url.test.ts +142 -0
  21. package/src/core/connection-url.ts +82 -0
  22. package/src/core/expand-replace-dependencies.test.ts +247 -8
  23. package/src/core/expand-replace-dependencies.ts +33 -5
  24. package/src/core/integrations/supabase.ts +1 -0
  25. package/src/core/objects/procedure/procedure.diff.test.ts +25 -0
  26. package/src/core/objects/procedure/procedure.diff.ts +12 -0
  27. package/src/core/objects/sequence/sequence.diff.test.ts +110 -8
  28. package/src/core/objects/sequence/sequence.diff.ts +16 -6
  29. package/src/core/objects/table/changes/table.alter.test.ts +14 -0
  30. package/src/core/objects/table/changes/table.alter.ts +4 -1
  31. package/src/core/objects/table/changes/table.drop.ts +27 -4
  32. package/src/core/objects/table/table.diff.test.ts +55 -0
  33. package/src/core/objects/table/table.diff.ts +10 -2
  34. package/src/core/post-diff-cycle-breaking.test.ts +317 -0
  35. package/src/core/post-diff-cycle-breaking.ts +236 -0
  36. package/src/core/postgres-config.test.ts +241 -0
  37. package/src/core/postgres-config.ts +127 -16
package/README.md CHANGED
@@ -181,7 +181,13 @@ See [Integrations Documentation](./docs/integrations.md) for complete details.
181
181
 
182
182
  ## Contributing
183
183
 
184
- Contributions welcome! Feel free to submit issues and pull requests.
184
+ Please follow the repository-level guide in [../../CONTRIBUTING.md](../../CONTRIBUTING.md).
185
+
186
+ In particular:
187
+
188
+ - Open an issue first.
189
+ - Wait for maintainer triage via one of `✨ Feature`, `🐛 Bug`, `📘 Docs`, or `🛠️ Chore` before opening a pull request.
190
+ - Use [../../ISSUES.md](../../ISSUES.md) when reporting `pg-delta` bugs so maintainers have what they need to reproduce them.
185
191
 
186
192
  ## License
187
193
 
@@ -1,5 +1,6 @@
1
1
  import debug from "debug";
2
2
  import { expandReplaceDependencies } from "./expand-replace-dependencies.js";
3
+ import { normalizePostDiffCycles } from "./post-diff-cycle-breaking.js";
3
4
  const debugCatalog = debug("pg-delta:catalog");
4
5
  import { diffAggregates } from "./objects/aggregate/aggregate.diff.js";
5
6
  import { DefaultPrivilegeState } from "./objects/base.default-privileges.js";
@@ -153,11 +154,16 @@ export function diffCatalogs(main, branch, options) {
153
154
  }
154
155
  return true;
155
156
  });
156
- filteredChanges = expandReplaceDependencies({
157
+ const expandedDependencies = expandReplaceDependencies({
157
158
  changes: filteredChanges,
158
159
  mainCatalog: main,
159
160
  branchCatalog: branch,
160
161
  });
162
+ filteredChanges = normalizePostDiffCycles({
163
+ changes: expandedDependencies.changes,
164
+ mainCatalog: main,
165
+ replacedTableIds: expandedDependencies.replacedTableIds,
166
+ });
161
167
  debugCatalog("changes catalog diff: %O", stringifyWithBigInt(filteredChanges, 2));
162
168
  return filteredChanges;
163
169
  }
@@ -0,0 +1,32 @@
1
+ /**
2
+ * Connection URL normalization for pg-delta.
3
+ *
4
+ * Auto-normalizes percent-encoded IPv6 hosts in PostgreSQL connection URLs.
5
+ * A URL like `postgresql://user:pass@2406%3Ada18%3A...%3Ab3c9:5432/db`
6
+ * becomes `postgresql://user:pass@[2406:da18:...:b3c9]:5432/db` before it
7
+ * reaches `pg-connection-string` / `pg.Pool`, so DNS resolution sees the
8
+ * address in its canonical bracketed form.
9
+ *
10
+ * Non-IPv6 hosts (IPv4, DNS names, already-bracketed IPv6, partial fragments
11
+ * that just happen to contain `%3A`) are returned verbatim.
12
+ */
13
+ /**
14
+ * Return true if `value` is a valid IPv6 literal in any canonical form:
15
+ * full 8-group, `::` compression, or IPv4-mapped (`::ffff:1.2.3.4`).
16
+ * RFC 4007 zone identifiers (`fe80::1%eth0`) are accepted.
17
+ */
18
+ export declare function isIPv6(value: string): boolean;
19
+ /**
20
+ * Normalize a PostgreSQL connection URL so IPv6 hosts reach pg in the
21
+ * canonical bracketed form.
22
+ *
23
+ * If the URL's hostname contains a percent-encoded colon AND the decoded
24
+ * hostname is a valid IPv6 literal, the hostname is decoded and wrapped in
25
+ * `[...]`. All other fields (scheme, userinfo, port, path, query, fragment)
26
+ * are preserved byte-for-byte from the input.
27
+ *
28
+ * Any URL whose decoded hostname does not validate as IPv6 is returned
29
+ * verbatim, so a malformed input will surface its usual downstream error
30
+ * instead of being silently rewritten.
31
+ */
32
+ export declare function normalizeConnectionUrl(url: string): string;
@@ -0,0 +1,77 @@
1
+ /**
2
+ * Connection URL normalization for pg-delta.
3
+ *
4
+ * Auto-normalizes percent-encoded IPv6 hosts in PostgreSQL connection URLs.
5
+ * A URL like `postgresql://user:pass@2406%3Ada18%3A...%3Ab3c9:5432/db`
6
+ * becomes `postgresql://user:pass@[2406:da18:...:b3c9]:5432/db` before it
7
+ * reaches `pg-connection-string` / `pg.Pool`, so DNS resolution sees the
8
+ * address in its canonical bracketed form.
9
+ *
10
+ * Non-IPv6 hosts (IPv4, DNS names, already-bracketed IPv6, partial fragments
11
+ * that just happen to contain `%3A`) are returned verbatim.
12
+ */
13
+ // IPv6 detection regex vendored from ip-regex (Sindre Sorhus, MIT).
14
+ // https://github.com/sindresorhus/ip-regex
15
+ const v4 = "(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]\\d|\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]\\d|\\d)){3}";
16
+ const v6seg = "[a-fA-F\\d]{1,4}";
17
+ const v6 = `
18
+ (?:
19
+ (?:${v6seg}:){7}(?:${v6seg}|:)|
20
+ (?:${v6seg}:){6}(?:${v4}|:${v6seg}|:)|
21
+ (?:${v6seg}:){5}(?::${v4}|(?::${v6seg}){1,2}|:)|
22
+ (?:${v6seg}:){4}(?:(?::${v6seg}){0,1}:${v4}|(?::${v6seg}){1,3}|:)|
23
+ (?:${v6seg}:){3}(?:(?::${v6seg}){0,2}:${v4}|(?::${v6seg}){1,4}|:)|
24
+ (?:${v6seg}:){2}(?:(?::${v6seg}){0,3}:${v4}|(?::${v6seg}){1,5}|:)|
25
+ (?:${v6seg}:){1}(?:(?::${v6seg}){0,4}:${v4}|(?::${v6seg}){1,6}|:)|
26
+ (?::(?:(?::${v6seg}){0,5}:${v4}|(?::${v6seg}){1,7}|:))
27
+ )(?:%[0-9a-zA-Z]{1,})?
28
+ `
29
+ .replace(/\s*\/\/.*$/gm, "")
30
+ .replace(/\n/g, "")
31
+ .trim();
32
+ const V6_EXACT = new RegExp(`^${v6}$`);
33
+ /**
34
+ * Return true if `value` is a valid IPv6 literal in any canonical form:
35
+ * full 8-group, `::` compression, or IPv4-mapped (`::ffff:1.2.3.4`).
36
+ * RFC 4007 zone identifiers (`fe80::1%eth0`) are accepted.
37
+ */
38
+ export function isIPv6(value) {
39
+ return typeof value === "string" && V6_EXACT.test(value);
40
+ }
41
+ /**
42
+ * Normalize a PostgreSQL connection URL so IPv6 hosts reach pg in the
43
+ * canonical bracketed form.
44
+ *
45
+ * If the URL's hostname contains a percent-encoded colon AND the decoded
46
+ * hostname is a valid IPv6 literal, the hostname is decoded and wrapped in
47
+ * `[...]`. All other fields (scheme, userinfo, port, path, query, fragment)
48
+ * are preserved byte-for-byte from the input.
49
+ *
50
+ * Any URL whose decoded hostname does not validate as IPv6 is returned
51
+ * verbatim, so a malformed input will surface its usual downstream error
52
+ * instead of being silently rewritten.
53
+ */
54
+ export function normalizeConnectionUrl(url) {
55
+ const urlObj = new URL(url);
56
+ // Cheap pre-filter: only look closer if the hostname contains a
57
+ // percent-encoded colon. Anything else is left entirely untouched.
58
+ if (!/%3[aA]/.test(urlObj.hostname))
59
+ return url;
60
+ const decodedHost = decodeURIComponent(urlObj.hostname);
61
+ // Authoritative validation: only normalize when the decoded string is a
62
+ // real IPv6 literal. Rejects partial fragments, random hostnames that
63
+ // happen to contain `%3A`, and any malformed input.
64
+ if (!isIPv6(decodedHost))
65
+ return url;
66
+ // Preserve username/password/port/path/search/hash exactly as they appear
67
+ // in the WHATWG URL model (these are returned already percent-encoded).
68
+ const scheme = `${urlObj.protocol}//`;
69
+ const auth = urlObj.username
70
+ ? urlObj.password
71
+ ? `${urlObj.username}:${urlObj.password}@`
72
+ : `${urlObj.username}@`
73
+ : "";
74
+ const port = urlObj.port ? `:${urlObj.port}` : "";
75
+ const tail = `${urlObj.pathname}${urlObj.search}${urlObj.hash}`;
76
+ return `${scheme}${auth}[${decodedHost}]${port}${tail}`;
77
+ }
@@ -5,10 +5,16 @@ import type { Change } from "./change.types.ts";
5
5
  * replaced so that destructive drops succeed. Uses dependency edges from pg_depend
6
6
  * (already captured in Catalog.depends) plus change metadata (creates/drops/requires).
7
7
  *
8
- * New changes are appended; ordering is handled later by the sorter.
8
+ * New changes are appended; ordering and any multi-statement cycle normalization
9
+ * are handled later by post-diff helpers and the sorter.
9
10
  */
11
+ interface ExpandReplaceDependenciesResult {
12
+ changes: Change[];
13
+ replacedTableIds: ReadonlySet<string>;
14
+ }
10
15
  export declare function expandReplaceDependencies({ changes, mainCatalog, branchCatalog, }: {
11
16
  changes: Change[];
12
17
  mainCatalog: Catalog;
13
18
  branchCatalog: Catalog;
14
- }): Change[];
19
+ }): ExpandReplaceDependenciesResult;
20
+ export {};
@@ -17,13 +17,6 @@ import { DropRange } from "./objects/type/range/changes/range.drop.js";
17
17
  import { stableId } from "./objects/utils.js";
18
18
  import { CreateView } from "./objects/view/changes/view.create.js";
19
19
  import { DropView } from "./objects/view/changes/view.drop.js";
20
- /**
21
- * For objects we are replacing (drop + create), ensure that any dependents are also
22
- * replaced so that destructive drops succeed. Uses dependency edges from pg_depend
23
- * (already captured in Catalog.depends) plus change metadata (creates/drops/requires).
24
- *
25
- * New changes are appended; ordering is handled later by the sorter.
26
- */
27
20
  export function expandReplaceDependencies({ changes, mainCatalog, branchCatalog, }) {
28
21
  const createdIds = new Set();
29
22
  const droppedIds = new Set();
@@ -40,7 +33,10 @@ export function expandReplaceDependencies({ changes, mainCatalog, branchCatalog,
40
33
  }
41
34
  }
42
35
  if (replaceRoots.size === 0) {
43
- return changes;
36
+ return {
37
+ changes,
38
+ replacedTableIds: new Set(),
39
+ };
44
40
  }
45
41
  // Build referenced -> dependents adjacency from main catalog dependencies.
46
42
  const dependentsByReferenced = new Map();
@@ -56,6 +52,12 @@ export function expandReplaceDependencies({ changes, mainCatalog, branchCatalog,
56
52
  const visitedTargets = new Set();
57
53
  const visitedRefs = new Set(replaceRoots);
58
54
  const queue = [...replaceRoots];
55
+ // Tables being replaced by an expansion-added DropTable+CreateTable pair.
56
+ // Any pre-existing targeted AlterTable*(T) object-scope change is superseded
57
+ // by the replacement and must be removed to avoid contradictions (e.g. an
58
+ // AlterTableDropColumn on a table that is about to be dropped) and the
59
+ // associated drop-phase cycle with the catalog constraint→column edge.
60
+ const tablesReplacedByExpansion = new Set();
59
61
  while (queue.length > 0) {
60
62
  const refId = queue.shift();
61
63
  const dependents = dependentsByReferenced.get(refId);
@@ -102,6 +104,12 @@ export function expandReplaceDependencies({ changes, mainCatalog, branchCatalog,
102
104
  if (!replacementChanges)
103
105
  continue;
104
106
  additions.push(...replacementChanges);
107
+ // If we added a DropTable(T) for an existing table, mark T so any
108
+ // pre-existing object-scope AlterTable*(T) changes get dropped below —
109
+ // the DropTable+CreateTable pair supersedes all structural alterations.
110
+ if (resolved.kind === "table" && addDrop) {
111
+ tablesReplacedByExpansion.add(targetId);
112
+ }
105
113
  // Track new creates/drops so we don't duplicate work for downstream dependents.
106
114
  for (const change of replacementChanges) {
107
115
  for (const id of change.creates ?? [])
@@ -112,9 +120,15 @@ export function expandReplaceDependencies({ changes, mainCatalog, branchCatalog,
112
120
  }
113
121
  }
114
122
  if (additions.length === 0) {
115
- return changes;
123
+ return {
124
+ changes,
125
+ replacedTableIds: tablesReplacedByExpansion,
126
+ };
116
127
  }
117
- return [...changes, ...additions];
128
+ return {
129
+ changes: [...changes, ...additions],
130
+ replacedTableIds: tablesReplacedByExpansion,
131
+ };
118
132
  }
119
133
  function isOwnedSequenceColumnDependency(referencedId, dependentId, mainCatalog, branchCatalog) {
120
134
  // When a sequence replace root is still OWNED BY the same column, the
@@ -13,6 +13,7 @@ const SUPABASE_SYSTEM_SCHEMAS = [
13
13
  "_supavisor",
14
14
  "auth",
15
15
  "cron",
16
+ "etl",
16
17
  "extensions",
17
18
  "graphql",
18
19
  "graphql_public",
@@ -99,6 +99,14 @@ export function diffProcedures(ctx, main, branch) {
99
99
  if (nonAlterablePropsChanged) {
100
100
  // Replace the entire procedure
101
101
  changes.push(new CreateProcedure({ procedure: branchProcedure, orReplace: true }));
102
+ if (mainProcedure.comment !== branchProcedure.comment) {
103
+ if (branchProcedure.comment === null) {
104
+ changes.push(new DropCommentOnProcedure({ procedure: mainProcedure }));
105
+ }
106
+ else {
107
+ changes.push(new CreateCommentOnProcedure({ procedure: branchProcedure }));
108
+ }
109
+ }
102
110
  }
103
111
  else {
104
112
  // Only alterable properties changed - check each one
@@ -59,16 +59,24 @@ export function diffSequences(ctx, main, branch, branchTables = {}) {
59
59
  }
60
60
  for (const sequenceId of dropped) {
61
61
  const sequence = main[sequenceId];
62
- // Skip generating DROP SEQUENCE if the sequence is owned by a table that's being dropped.
63
- // PostgreSQL automatically drops sequences owned by tables when the table is dropped,
64
- // so generating DROP SEQUENCE would cause an error (sequence doesn't exist).
62
+ // Skip generating DROP SEQUENCE if the sequence is owned by a table/column that's being dropped.
63
+ // PostgreSQL automatically cascades owned sequences when the owning table OR the owning
64
+ // column is dropped (via OWNED BY). Emitting DROP SEQUENCE in those cases would either
65
+ // fail at apply time (sequence already gone) or — in the column-drop case — create an
66
+ // unbreakable DropSequence ↔ AlterTableDropColumn cycle in the drop-phase sort graph.
65
67
  if (sequence.owned_by_schema &&
66
68
  sequence.owned_by_table &&
67
69
  sequence.owned_by_column) {
68
70
  const ownedByTableId = `table:${sequence.owned_by_schema}.${sequence.owned_by_table}`;
69
- // If the owning table doesn't exist in branch catalog, it's being dropped
70
- // and will auto-drop this sequence, so skip generating DROP SEQUENCE
71
- if (!(ownedByTableId in branchTables)) {
71
+ const ownedByTable = branchTables[ownedByTableId];
72
+ // Owning table is dropped PG auto-drops the owned sequence.
73
+ if (!ownedByTable) {
74
+ continue;
75
+ }
76
+ // Owning column is dropped (table survives) → PG still auto-drops the owned
77
+ // sequence as part of the column drop, so we must not emit DROP SEQUENCE.
78
+ const ownedByColumnExists = ownedByTable.columns?.some((col) => col.name === sequence.owned_by_column);
79
+ if (!ownedByColumnExists) {
72
80
  continue;
73
81
  }
74
82
  }
@@ -462,13 +462,16 @@ export class AlterTableAlterColumnSetDefault extends AlterTableChange {
462
462
  }
463
463
  serialize(_options) {
464
464
  const set = this.column.is_generated ? "SET EXPRESSION AS" : "SET DEFAULT";
465
+ const value = this.column.is_generated
466
+ ? `(${this.column.default ?? "NULL"})`
467
+ : (this.column.default ?? "NULL");
465
468
  return [
466
469
  "ALTER TABLE",
467
470
  `${this.table.schema}.${this.table.name}`,
468
471
  "ALTER COLUMN",
469
472
  this.column.name,
470
473
  set,
471
- this.column.default ?? "NULL",
474
+ value,
472
475
  ].join(" ");
473
476
  }
474
477
  }
@@ -14,9 +14,21 @@ import { DropTableChange } from "./table.base.ts";
14
14
  export declare class DropTable extends DropTableChange {
15
15
  readonly table: Table;
16
16
  readonly scope: "object";
17
+ /**
18
+ * Names of constraints on this table that are dropped explicitly by a
19
+ * separate `AlterTableDropConstraint` change. Those constraints must not be
20
+ * claimed by `DropTable.drops` / `.requires`, otherwise catalog edges tied
21
+ * to the constraint stableId will attach to this DropTable node instead of
22
+ * the dedicated AlterTableDropConstraint node. When two tables with mutual
23
+ * FK references are dropped in the same phase, that misattribution
24
+ * produces an unbreakable cycle between the two DropTable changes.
25
+ */
26
+ readonly externallyDroppedConstraints: ReadonlySet<string>;
17
27
  constructor(props: {
18
28
  table: Table;
29
+ externallyDroppedConstraints?: ReadonlySet<string>;
19
30
  });
31
+ private get claimedConstraints();
20
32
  get drops(): (`column:${string}.${string}.${string}` | `constraint:${string}.${string}.${string}` | `table:${string}`)[];
21
33
  get requires(): (`column:${string}.${string}.${string}` | `constraint:${string}.${string}.${string}` | `table:${string}`)[];
22
34
  serialize(_options?: SerializeOptions): string;
@@ -13,17 +13,34 @@ import { DropTableChange } from "./table.base.js";
13
13
  export class DropTable extends DropTableChange {
14
14
  table;
15
15
  scope = "object";
16
+ /**
17
+ * Names of constraints on this table that are dropped explicitly by a
18
+ * separate `AlterTableDropConstraint` change. Those constraints must not be
19
+ * claimed by `DropTable.drops` / `.requires`, otherwise catalog edges tied
20
+ * to the constraint stableId will attach to this DropTable node instead of
21
+ * the dedicated AlterTableDropConstraint node. When two tables with mutual
22
+ * FK references are dropped in the same phase, that misattribution
23
+ * produces an unbreakable cycle between the two DropTable changes.
24
+ */
25
+ externallyDroppedConstraints;
16
26
  constructor(props) {
17
27
  super();
18
28
  this.table = props.table;
29
+ this.externallyDroppedConstraints =
30
+ props.externallyDroppedConstraints ?? new Set();
31
+ }
32
+ get claimedConstraints() {
33
+ return this.table.constraints.filter((constraint) => !this.externallyDroppedConstraints.has(constraint.name));
19
34
  }
20
35
  get drops() {
21
36
  return [
22
37
  this.table.stableId,
23
38
  ...this.table.columns.map((column) => stableId.column(this.table.schema, this.table.name, column.name)),
24
39
  // Include constraint stableIds so FK relationships that only exist at the
25
- // constraint level still affect whole-table drop ordering.
26
- ...this.table.constraints.map((constraint) => stableId.constraint(this.table.schema, this.table.name, constraint.name)),
40
+ // constraint level still affect whole-table drop ordering. Skip any
41
+ // constraint that the diff layer is dropping via a dedicated
42
+ // AlterTableDropConstraint change — that node owns the stableId.
43
+ ...this.claimedConstraints.map((constraint) => stableId.constraint(this.table.schema, this.table.name, constraint.name)),
27
44
  ];
28
45
  }
29
46
  get requires() {
@@ -32,7 +49,7 @@ export class DropTable extends DropTableChange {
32
49
  ...this.table.columns.map((col) => stableId.column(this.table.schema, this.table.name, col.name)),
33
50
  // Mirror the dropped constraint ids in requires so drop-phase graph
34
51
  // consumers can connect catalog FK edges back to this table drop.
35
- ...this.table.constraints.map((constraint) => stableId.constraint(this.table.schema, this.table.name, constraint.name)),
52
+ ...this.claimedConstraints.map((constraint) => stableId.constraint(this.table.schema, this.table.name, constraint.name)),
36
53
  ];
37
54
  }
38
55
  serialize(_options) {
@@ -486,9 +486,14 @@ export function diffTables(ctx, main, branch) {
486
486
  // Set new default value
487
487
  const isGeneratedColumn = branchCol.is_generated;
488
488
  const isPostgresLowerThan17 = ctx.version < 170000;
489
- if (isGeneratedColumn && isPostgresLowerThan17) {
489
+ const generatedStatusChanged = mainCol.is_generated !== branchCol.is_generated;
490
+ if (isGeneratedColumn &&
491
+ (isPostgresLowerThan17 || generatedStatusChanged)) {
490
492
  // For generated columns in < PostgreSQL 17, we need to drop and recreate
491
- // instead of using SET EXPRESSION AS for computed columns
493
+ // instead of using SET EXPRESSION AS for computed columns. We also
494
+ // need to recreate the column when switching between regular and
495
+ // generated states because SET EXPRESSION only applies to existing
496
+ // generated columns.
492
497
  // cf: https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=5d06e99a3
493
498
  // cf: https://www.postgresql.org/docs/release/17.0/
494
499
  // > Allow ALTER TABLE to change a column's generation expression
@@ -0,0 +1,22 @@
1
+ import type { Catalog } from "./catalog.model.ts";
2
+ import type { Change } from "./change.types.ts";
3
+ /**
4
+ * Normalize change-list cycles that only become apparent after all object
5
+ * diffs have been collected.
6
+ *
7
+ * This pass intentionally handles whole-plan interactions only:
8
+ * - If replace expansion added `DropTable(T)+CreateTable(T)`, targeted
9
+ * `AlterTableDropColumn(T.*)` / `AlterTableDropConstraint(T.*)` changes are
10
+ * redundant and create an unbreakable drop-phase cycle, so we elide them.
11
+ * - If two dropped tables reference each other via FK, we insert dedicated
12
+ * `AlterTableDropConstraint` changes and teach the paired `DropTable`
13
+ * changes not to claim those FK stable IDs.
14
+ *
15
+ * Object-local PostgreSQL semantics (for example owned-sequence cascades) stay
16
+ * in the corresponding `diff*` function instead of this pass.
17
+ */
18
+ export declare function normalizePostDiffCycles({ changes, mainCatalog, replacedTableIds, }: {
19
+ changes: Change[];
20
+ mainCatalog: Catalog;
21
+ replacedTableIds?: ReadonlySet<string>;
22
+ }): Change[];
@@ -0,0 +1,143 @@
1
+ import { AlterTableDropColumn, AlterTableDropConstraint, } from "./objects/table/changes/table.alter.js";
2
+ import { DropTable } from "./objects/table/changes/table.drop.js";
3
+ import { stableId } from "./objects/utils.js";
4
+ function constraintStableId(table, constraintName) {
5
+ return stableId.constraint(table.schema, table.name, constraintName);
6
+ }
7
+ /**
8
+ * Yield FK constraints on `table` whose referenced table is also dropped in the
9
+ * final plan. Self-references are left alone because the sort phase already
10
+ * handles the resulting self-loop correctly.
11
+ */
12
+ function* iterCrossDropFkConstraints(table, droppedSet) {
13
+ for (const constraint of table.constraints) {
14
+ if (constraint.constraint_type !== "f")
15
+ continue;
16
+ if (constraint.is_partition_clone)
17
+ continue;
18
+ if (!constraint.foreign_key_schema || !constraint.foreign_key_table) {
19
+ continue;
20
+ }
21
+ const referencedId = stableId.table(constraint.foreign_key_schema, constraint.foreign_key_table);
22
+ if (referencedId === table.stableId)
23
+ continue;
24
+ if (!droppedSet.has(referencedId))
25
+ continue;
26
+ yield { constraint, referencedId };
27
+ }
28
+ }
29
+ function isSupersededByTableReplacement(change, replacedTableIds) {
30
+ if (!(change instanceof AlterTableDropColumn) &&
31
+ !(change instanceof AlterTableDropConstraint)) {
32
+ return false;
33
+ }
34
+ return replacedTableIds.has(change.table.stableId);
35
+ }
36
+ function collectExplicitConstraintDropIds(changes) {
37
+ const explicitConstraintDropIds = new Set();
38
+ for (const change of changes) {
39
+ if (!(change instanceof AlterTableDropConstraint))
40
+ continue;
41
+ explicitConstraintDropIds.add(constraintStableId(change.table, change.constraint.name));
42
+ }
43
+ return explicitConstraintDropIds;
44
+ }
45
+ function hasSameEntries(left, right) {
46
+ if (left.size !== right.size)
47
+ return false;
48
+ for (const value of left) {
49
+ if (!right.has(value))
50
+ return false;
51
+ }
52
+ return true;
53
+ }
54
+ /**
55
+ * Normalize change-list cycles that only become apparent after all object
56
+ * diffs have been collected.
57
+ *
58
+ * This pass intentionally handles whole-plan interactions only:
59
+ * - If replace expansion added `DropTable(T)+CreateTable(T)`, targeted
60
+ * `AlterTableDropColumn(T.*)` / `AlterTableDropConstraint(T.*)` changes are
61
+ * redundant and create an unbreakable drop-phase cycle, so we elide them.
62
+ * - If two dropped tables reference each other via FK, we insert dedicated
63
+ * `AlterTableDropConstraint` changes and teach the paired `DropTable`
64
+ * changes not to claim those FK stable IDs.
65
+ *
66
+ * Object-local PostgreSQL semantics (for example owned-sequence cascades) stay
67
+ * in the corresponding `diff*` function instead of this pass.
68
+ */
69
+ export function normalizePostDiffCycles({ changes, mainCatalog, replacedTableIds = new Set(), }) {
70
+ const structurallyNormalizedChanges = replacedTableIds.size === 0
71
+ ? changes
72
+ : changes.filter((change) => !isSupersededByTableReplacement(change, replacedTableIds));
73
+ const dropTableChanges = structurallyNormalizedChanges.filter((change) => change instanceof DropTable);
74
+ if (dropTableChanges.length < 2) {
75
+ return structurallyNormalizedChanges;
76
+ }
77
+ const droppedSet = new Set(dropTableChanges.map((change) => change.table.stableId));
78
+ const droppedFkTargets = new Map();
79
+ for (const dropTableChange of dropTableChanges) {
80
+ const mainTable = mainCatalog.tables[dropTableChange.table.stableId] ??
81
+ dropTableChange.table;
82
+ const targets = new Set();
83
+ for (const { referencedId } of iterCrossDropFkConstraints(mainTable, droppedSet)) {
84
+ targets.add(referencedId);
85
+ }
86
+ droppedFkTargets.set(mainTable.stableId, targets);
87
+ }
88
+ const explicitConstraintDropIds = collectExplicitConstraintDropIds(structurallyNormalizedChanges);
89
+ const injectedConstraintDropsByTableId = new Map();
90
+ const externallyDroppedConstraintsByTableId = new Map();
91
+ let didMutate = structurallyNormalizedChanges !== changes;
92
+ for (const dropTableChange of dropTableChanges) {
93
+ const mainTable = mainCatalog.tables[dropTableChange.table.stableId] ??
94
+ dropTableChange.table;
95
+ const externallyDroppedConstraints = new Set(dropTableChange.externallyDroppedConstraints);
96
+ for (const { constraint, referencedId } of iterCrossDropFkConstraints(mainTable, droppedSet)) {
97
+ const isMutual = droppedFkTargets.get(referencedId)?.has(mainTable.stableId) === true;
98
+ if (!isMutual)
99
+ continue;
100
+ const droppedConstraintStableId = constraintStableId(mainTable, constraint.name);
101
+ externallyDroppedConstraints.add(constraint.name);
102
+ if (!explicitConstraintDropIds.has(droppedConstraintStableId)) {
103
+ const injectedDrop = new AlterTableDropConstraint({
104
+ table: mainTable,
105
+ constraint,
106
+ });
107
+ const existingDrops = injectedConstraintDropsByTableId.get(mainTable.stableId) ?? [];
108
+ existingDrops.push(injectedDrop);
109
+ injectedConstraintDropsByTableId.set(mainTable.stableId, existingDrops);
110
+ explicitConstraintDropIds.add(droppedConstraintStableId);
111
+ didMutate = true;
112
+ }
113
+ }
114
+ if (!hasSameEntries(dropTableChange.externallyDroppedConstraints, externallyDroppedConstraints)) {
115
+ externallyDroppedConstraintsByTableId.set(mainTable.stableId, externallyDroppedConstraints);
116
+ didMutate = true;
117
+ }
118
+ }
119
+ if (!didMutate) {
120
+ return changes;
121
+ }
122
+ const normalizedChanges = [];
123
+ for (const change of structurallyNormalizedChanges) {
124
+ if (!(change instanceof DropTable)) {
125
+ normalizedChanges.push(change);
126
+ continue;
127
+ }
128
+ const injectedConstraintDrops = injectedConstraintDropsByTableId.get(change.table.stableId) ?? [];
129
+ if (injectedConstraintDrops.length > 0) {
130
+ normalizedChanges.push(...injectedConstraintDrops);
131
+ }
132
+ const externallyDroppedConstraints = externallyDroppedConstraintsByTableId.get(change.table.stableId);
133
+ if (!externallyDroppedConstraints) {
134
+ normalizedChanges.push(change);
135
+ continue;
136
+ }
137
+ normalizedChanges.push(new DropTable({
138
+ table: change.table,
139
+ externallyDroppedConstraints,
140
+ }));
141
+ }
142
+ return normalizedChanges;
143
+ }
@@ -3,6 +3,33 @@
3
3
  */
4
4
  import type { ClientBase, PoolClient, PoolConfig } from "pg";
5
5
  import { Pool } from "pg";
6
+ /**
7
+ * Return true when `err` represents a transient connect failure that makes
8
+ * sense to retry with backoff (e.g. refused connections, DNS blips, our own
9
+ * eager-connect timeout wrapper). Returns false for permanent failures such
10
+ * as authentication errors, TLS negotiation errors, and `ENOTFOUND`.
11
+ *
12
+ * Unknown errors are treated as retryable on purpose: transient-by-default
13
+ * is safer here because a duplicated retry is strictly cheaper than a spurious
14
+ * hard failure during catalog extraction.
15
+ */
16
+ export declare function isRetryableConnectError(err: unknown): boolean;
17
+ /**
18
+ * Retry an async `connect` operation with bounded exponential backoff.
19
+ * Stops immediately on a non-retryable error. On exhausted attempts, throws
20
+ * the last observed error.
21
+ *
22
+ * Exposed for testing — production call sites always go through
23
+ * {@link createManagedPool}.
24
+ */
25
+ export declare function connectWithRetry<T>(opts: {
26
+ connect: (attempt: number) => Promise<T>;
27
+ isRetryable?: (err: unknown) => boolean;
28
+ maxAttempts?: number;
29
+ baseBackoffMs?: number;
30
+ maxBackoffMs?: number;
31
+ sleep?: (ms: number) => Promise<void>;
32
+ }): Promise<T>;
6
33
  /**
7
34
  * Options for creating a Pool with event listeners.
8
35
  */