@supabase/pg-delta 1.0.0-alpha.14 → 1.0.0-alpha.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -181,7 +181,13 @@ See [Integrations Documentation](./docs/integrations.md) for complete details.
181
181
 
182
182
  ## Contributing
183
183
 
184
- Contributions welcome! Feel free to submit issues and pull requests.
184
+ Please follow the repository-level guide in [../../CONTRIBUTING.md](../../CONTRIBUTING.md).
185
+
186
+ In particular:
187
+
188
+ - Open an issue first.
189
+ - Wait for maintainer triage via one of `✨ Feature`, `🐛 Bug`, `📘 Docs`, or `🛠️ Chore` before opening a pull request.
190
+ - Use [../../ISSUES.md](../../ISSUES.md) when reporting `pg-delta` bugs so maintainers have what they need to reproduce them.
185
191
 
186
192
  ## License
187
193
 
@@ -1,5 +1,6 @@
1
1
  import debug from "debug";
2
2
  import { expandReplaceDependencies } from "./expand-replace-dependencies.js";
3
+ import { normalizePostDiffCycles } from "./post-diff-cycle-breaking.js";
3
4
  const debugCatalog = debug("pg-delta:catalog");
4
5
  import { diffAggregates } from "./objects/aggregate/aggregate.diff.js";
5
6
  import { DefaultPrivilegeState } from "./objects/base.default-privileges.js";
@@ -153,11 +154,16 @@ export function diffCatalogs(main, branch, options) {
153
154
  }
154
155
  return true;
155
156
  });
156
- filteredChanges = expandReplaceDependencies({
157
+ const expandedDependencies = expandReplaceDependencies({
157
158
  changes: filteredChanges,
158
159
  mainCatalog: main,
159
160
  branchCatalog: branch,
160
161
  });
162
+ filteredChanges = normalizePostDiffCycles({
163
+ changes: expandedDependencies.changes,
164
+ mainCatalog: main,
165
+ replacedTableIds: expandedDependencies.replacedTableIds,
166
+ });
161
167
  debugCatalog("changes catalog diff: %O", stringifyWithBigInt(filteredChanges, 2));
162
168
  return filteredChanges;
163
169
  }
@@ -5,10 +5,16 @@ import type { Change } from "./change.types.ts";
5
5
  * replaced so that destructive drops succeed. Uses dependency edges from pg_depend
6
6
  * (already captured in Catalog.depends) plus change metadata (creates/drops/requires).
7
7
  *
8
- * New changes are appended; ordering is handled later by the sorter.
8
+ * New changes are appended; ordering and any multi-statement cycle normalization
9
+ * are handled later by post-diff helpers and the sorter.
9
10
  */
11
+ interface ExpandReplaceDependenciesResult {
12
+ changes: Change[];
13
+ replacedTableIds: ReadonlySet<string>;
14
+ }
10
15
  export declare function expandReplaceDependencies({ changes, mainCatalog, branchCatalog, }: {
11
16
  changes: Change[];
12
17
  mainCatalog: Catalog;
13
18
  branchCatalog: Catalog;
14
- }): Change[];
19
+ }): ExpandReplaceDependenciesResult;
20
+ export {};
@@ -17,13 +17,6 @@ import { DropRange } from "./objects/type/range/changes/range.drop.js";
17
17
  import { stableId } from "./objects/utils.js";
18
18
  import { CreateView } from "./objects/view/changes/view.create.js";
19
19
  import { DropView } from "./objects/view/changes/view.drop.js";
20
- /**
21
- * For objects we are replacing (drop + create), ensure that any dependents are also
22
- * replaced so that destructive drops succeed. Uses dependency edges from pg_depend
23
- * (already captured in Catalog.depends) plus change metadata (creates/drops/requires).
24
- *
25
- * New changes are appended; ordering is handled later by the sorter.
26
- */
27
20
  export function expandReplaceDependencies({ changes, mainCatalog, branchCatalog, }) {
28
21
  const createdIds = new Set();
29
22
  const droppedIds = new Set();
@@ -40,7 +33,10 @@ export function expandReplaceDependencies({ changes, mainCatalog, branchCatalog,
40
33
  }
41
34
  }
42
35
  if (replaceRoots.size === 0) {
43
- return changes;
36
+ return {
37
+ changes,
38
+ replacedTableIds: new Set(),
39
+ };
44
40
  }
45
41
  // Build referenced -> dependents adjacency from main catalog dependencies.
46
42
  const dependentsByReferenced = new Map();
@@ -56,6 +52,12 @@ export function expandReplaceDependencies({ changes, mainCatalog, branchCatalog,
56
52
  const visitedTargets = new Set();
57
53
  const visitedRefs = new Set(replaceRoots);
58
54
  const queue = [...replaceRoots];
55
+ // Tables being replaced by an expansion-added DropTable+CreateTable pair.
56
+ // Any pre-existing targeted AlterTable*(T) object-scope change is superseded
57
+ // by the replacement and must be removed to avoid contradictions (e.g. an
58
+ // AlterTableDropColumn on a table that is about to be dropped) and the
59
+ // associated drop-phase cycle with the catalog constraint→column edge.
60
+ const tablesReplacedByExpansion = new Set();
59
61
  while (queue.length > 0) {
60
62
  const refId = queue.shift();
61
63
  const dependents = dependentsByReferenced.get(refId);
@@ -102,6 +104,12 @@ export function expandReplaceDependencies({ changes, mainCatalog, branchCatalog,
102
104
  if (!replacementChanges)
103
105
  continue;
104
106
  additions.push(...replacementChanges);
107
+ // If we added a DropTable(T) for an existing table, mark T so any
108
+ // pre-existing object-scope AlterTable*(T) changes get dropped below —
109
+ // the DropTable+CreateTable pair supersedes all structural alterations.
110
+ if (resolved.kind === "table" && addDrop) {
111
+ tablesReplacedByExpansion.add(targetId);
112
+ }
105
113
  // Track new creates/drops so we don't duplicate work for downstream dependents.
106
114
  for (const change of replacementChanges) {
107
115
  for (const id of change.creates ?? [])
@@ -112,9 +120,15 @@ export function expandReplaceDependencies({ changes, mainCatalog, branchCatalog,
112
120
  }
113
121
  }
114
122
  if (additions.length === 0) {
115
- return changes;
123
+ return {
124
+ changes,
125
+ replacedTableIds: tablesReplacedByExpansion,
126
+ };
116
127
  }
117
- return [...changes, ...additions];
128
+ return {
129
+ changes: [...changes, ...additions],
130
+ replacedTableIds: tablesReplacedByExpansion,
131
+ };
118
132
  }
119
133
  function isOwnedSequenceColumnDependency(referencedId, dependentId, mainCatalog, branchCatalog) {
120
134
  // When a sequence replace root is still OWNED BY the same column, the
@@ -59,16 +59,24 @@ export function diffSequences(ctx, main, branch, branchTables = {}) {
59
59
  }
60
60
  for (const sequenceId of dropped) {
61
61
  const sequence = main[sequenceId];
62
- // Skip generating DROP SEQUENCE if the sequence is owned by a table that's being dropped.
63
- // PostgreSQL automatically drops sequences owned by tables when the table is dropped,
64
- // so generating DROP SEQUENCE would cause an error (sequence doesn't exist).
62
+ // Skip generating DROP SEQUENCE if the sequence is owned by a table/column that's being dropped.
63
+ // PostgreSQL automatically cascades owned sequences when the owning table OR the owning
64
+ // column is dropped (via OWNED BY). Emitting DROP SEQUENCE in those cases would either
65
+ // fail at apply time (sequence already gone) or — in the column-drop case — create an
66
+ // unbreakable DropSequence ↔ AlterTableDropColumn cycle in the drop-phase sort graph.
65
67
  if (sequence.owned_by_schema &&
66
68
  sequence.owned_by_table &&
67
69
  sequence.owned_by_column) {
68
70
  const ownedByTableId = `table:${sequence.owned_by_schema}.${sequence.owned_by_table}`;
69
- // If the owning table doesn't exist in branch catalog, it's being dropped
70
- // and will auto-drop this sequence, so skip generating DROP SEQUENCE
71
- if (!(ownedByTableId in branchTables)) {
71
+ const ownedByTable = branchTables[ownedByTableId];
72
+ // Owning table is dropped PG auto-drops the owned sequence.
73
+ if (!ownedByTable) {
74
+ continue;
75
+ }
76
+ // Owning column is dropped (table survives) → PG still auto-drops the owned
77
+ // sequence as part of the column drop, so we must not emit DROP SEQUENCE.
78
+ const ownedByColumnExists = ownedByTable.columns?.some((col) => col.name === sequence.owned_by_column);
79
+ if (!ownedByColumnExists) {
72
80
  continue;
73
81
  }
74
82
  }
@@ -14,9 +14,21 @@ import { DropTableChange } from "./table.base.ts";
14
14
  export declare class DropTable extends DropTableChange {
15
15
  readonly table: Table;
16
16
  readonly scope: "object";
17
+ /**
18
+ * Names of constraints on this table that are dropped explicitly by a
19
+ * separate `AlterTableDropConstraint` change. Those constraints must not be
20
+ * claimed by `DropTable.drops` / `.requires`, otherwise catalog edges tied
21
+ * to the constraint stableId will attach to this DropTable node instead of
22
+ * the dedicated AlterTableDropConstraint node. When two tables with mutual
23
+ * FK references are dropped in the same phase, that misattribution
24
+ * produces an unbreakable cycle between the two DropTable changes.
25
+ */
26
+ readonly externallyDroppedConstraints: ReadonlySet<string>;
17
27
  constructor(props: {
18
28
  table: Table;
29
+ externallyDroppedConstraints?: ReadonlySet<string>;
19
30
  });
31
+ private get claimedConstraints();
20
32
  get drops(): (`column:${string}.${string}.${string}` | `constraint:${string}.${string}.${string}` | `table:${string}`)[];
21
33
  get requires(): (`column:${string}.${string}.${string}` | `constraint:${string}.${string}.${string}` | `table:${string}`)[];
22
34
  serialize(_options?: SerializeOptions): string;
@@ -13,17 +13,34 @@ import { DropTableChange } from "./table.base.js";
13
13
  export class DropTable extends DropTableChange {
14
14
  table;
15
15
  scope = "object";
16
+ /**
17
+ * Names of constraints on this table that are dropped explicitly by a
18
+ * separate `AlterTableDropConstraint` change. Those constraints must not be
19
+ * claimed by `DropTable.drops` / `.requires`, otherwise catalog edges tied
20
+ * to the constraint stableId will attach to this DropTable node instead of
21
+ * the dedicated AlterTableDropConstraint node. When two tables with mutual
22
+ * FK references are dropped in the same phase, that misattribution
23
+ * produces an unbreakable cycle between the two DropTable changes.
24
+ */
25
+ externallyDroppedConstraints;
16
26
  constructor(props) {
17
27
  super();
18
28
  this.table = props.table;
29
+ this.externallyDroppedConstraints =
30
+ props.externallyDroppedConstraints ?? new Set();
31
+ }
32
+ get claimedConstraints() {
33
+ return this.table.constraints.filter((constraint) => !this.externallyDroppedConstraints.has(constraint.name));
19
34
  }
20
35
  get drops() {
21
36
  return [
22
37
  this.table.stableId,
23
38
  ...this.table.columns.map((column) => stableId.column(this.table.schema, this.table.name, column.name)),
24
39
  // Include constraint stableIds so FK relationships that only exist at the
25
- // constraint level still affect whole-table drop ordering.
26
- ...this.table.constraints.map((constraint) => stableId.constraint(this.table.schema, this.table.name, constraint.name)),
40
+ // constraint level still affect whole-table drop ordering. Skip any
41
+ // constraint that the diff layer is dropping via a dedicated
42
+ // AlterTableDropConstraint change — that node owns the stableId.
43
+ ...this.claimedConstraints.map((constraint) => stableId.constraint(this.table.schema, this.table.name, constraint.name)),
27
44
  ];
28
45
  }
29
46
  get requires() {
@@ -32,7 +49,7 @@ export class DropTable extends DropTableChange {
32
49
  ...this.table.columns.map((col) => stableId.column(this.table.schema, this.table.name, col.name)),
33
50
  // Mirror the dropped constraint ids in requires so drop-phase graph
34
51
  // consumers can connect catalog FK edges back to this table drop.
35
- ...this.table.constraints.map((constraint) => stableId.constraint(this.table.schema, this.table.name, constraint.name)),
52
+ ...this.claimedConstraints.map((constraint) => stableId.constraint(this.table.schema, this.table.name, constraint.name)),
36
53
  ];
37
54
  }
38
55
  serialize(_options) {
@@ -0,0 +1,22 @@
1
+ import type { Catalog } from "./catalog.model.ts";
2
+ import type { Change } from "./change.types.ts";
3
+ /**
4
+ * Normalize change-list cycles that only become apparent after all object
5
+ * diffs have been collected.
6
+ *
7
+ * This pass intentionally handles whole-plan interactions only:
8
+ * - If replace expansion added `DropTable(T)+CreateTable(T)`, targeted
9
+ * `AlterTableDropColumn(T.*)` / `AlterTableDropConstraint(T.*)` changes are
10
+ * redundant and create an unbreakable drop-phase cycle, so we elide them.
11
+ * - If two dropped tables reference each other via FK, we insert dedicated
12
+ * `AlterTableDropConstraint` changes and teach the paired `DropTable`
13
+ * changes not to claim those FK stable IDs.
14
+ *
15
+ * Object-local PostgreSQL semantics (for example owned-sequence cascades) stay
16
+ * in the corresponding `diff*` function instead of this pass.
17
+ */
18
+ export declare function normalizePostDiffCycles({ changes, mainCatalog, replacedTableIds, }: {
19
+ changes: Change[];
20
+ mainCatalog: Catalog;
21
+ replacedTableIds?: ReadonlySet<string>;
22
+ }): Change[];
@@ -0,0 +1,143 @@
1
+ import { AlterTableDropColumn, AlterTableDropConstraint, } from "./objects/table/changes/table.alter.js";
2
+ import { DropTable } from "./objects/table/changes/table.drop.js";
3
+ import { stableId } from "./objects/utils.js";
4
+ function constraintStableId(table, constraintName) {
5
+ return stableId.constraint(table.schema, table.name, constraintName);
6
+ }
7
+ /**
8
+ * Yield FK constraints on `table` whose referenced table is also dropped in the
9
+ * final plan. Self-references are left alone because the sort phase already
10
+ * handles the resulting self-loop correctly.
11
+ */
12
+ function* iterCrossDropFkConstraints(table, droppedSet) {
13
+ for (const constraint of table.constraints) {
14
+ if (constraint.constraint_type !== "f")
15
+ continue;
16
+ if (constraint.is_partition_clone)
17
+ continue;
18
+ if (!constraint.foreign_key_schema || !constraint.foreign_key_table) {
19
+ continue;
20
+ }
21
+ const referencedId = stableId.table(constraint.foreign_key_schema, constraint.foreign_key_table);
22
+ if (referencedId === table.stableId)
23
+ continue;
24
+ if (!droppedSet.has(referencedId))
25
+ continue;
26
+ yield { constraint, referencedId };
27
+ }
28
+ }
29
+ function isSupersededByTableReplacement(change, replacedTableIds) {
30
+ if (!(change instanceof AlterTableDropColumn) &&
31
+ !(change instanceof AlterTableDropConstraint)) {
32
+ return false;
33
+ }
34
+ return replacedTableIds.has(change.table.stableId);
35
+ }
36
+ function collectExplicitConstraintDropIds(changes) {
37
+ const explicitConstraintDropIds = new Set();
38
+ for (const change of changes) {
39
+ if (!(change instanceof AlterTableDropConstraint))
40
+ continue;
41
+ explicitConstraintDropIds.add(constraintStableId(change.table, change.constraint.name));
42
+ }
43
+ return explicitConstraintDropIds;
44
+ }
45
+ function hasSameEntries(left, right) {
46
+ if (left.size !== right.size)
47
+ return false;
48
+ for (const value of left) {
49
+ if (!right.has(value))
50
+ return false;
51
+ }
52
+ return true;
53
+ }
54
+ /**
55
+ * Normalize change-list cycles that only become apparent after all object
56
+ * diffs have been collected.
57
+ *
58
+ * This pass intentionally handles whole-plan interactions only:
59
+ * - If replace expansion added `DropTable(T)+CreateTable(T)`, targeted
60
+ * `AlterTableDropColumn(T.*)` / `AlterTableDropConstraint(T.*)` changes are
61
+ * redundant and create an unbreakable drop-phase cycle, so we elide them.
62
+ * - If two dropped tables reference each other via FK, we insert dedicated
63
+ * `AlterTableDropConstraint` changes and teach the paired `DropTable`
64
+ * changes not to claim those FK stable IDs.
65
+ *
66
+ * Object-local PostgreSQL semantics (for example owned-sequence cascades) stay
67
+ * in the corresponding `diff*` function instead of this pass.
68
+ */
69
+ export function normalizePostDiffCycles({ changes, mainCatalog, replacedTableIds = new Set(), }) {
70
+ const structurallyNormalizedChanges = replacedTableIds.size === 0
71
+ ? changes
72
+ : changes.filter((change) => !isSupersededByTableReplacement(change, replacedTableIds));
73
+ const dropTableChanges = structurallyNormalizedChanges.filter((change) => change instanceof DropTable);
74
+ if (dropTableChanges.length < 2) {
75
+ return structurallyNormalizedChanges;
76
+ }
77
+ const droppedSet = new Set(dropTableChanges.map((change) => change.table.stableId));
78
+ const droppedFkTargets = new Map();
79
+ for (const dropTableChange of dropTableChanges) {
80
+ const mainTable = mainCatalog.tables[dropTableChange.table.stableId] ??
81
+ dropTableChange.table;
82
+ const targets = new Set();
83
+ for (const { referencedId } of iterCrossDropFkConstraints(mainTable, droppedSet)) {
84
+ targets.add(referencedId);
85
+ }
86
+ droppedFkTargets.set(mainTable.stableId, targets);
87
+ }
88
+ const explicitConstraintDropIds = collectExplicitConstraintDropIds(structurallyNormalizedChanges);
89
+ const injectedConstraintDropsByTableId = new Map();
90
+ const externallyDroppedConstraintsByTableId = new Map();
91
+ let didMutate = structurallyNormalizedChanges !== changes;
92
+ for (const dropTableChange of dropTableChanges) {
93
+ const mainTable = mainCatalog.tables[dropTableChange.table.stableId] ??
94
+ dropTableChange.table;
95
+ const externallyDroppedConstraints = new Set(dropTableChange.externallyDroppedConstraints);
96
+ for (const { constraint, referencedId } of iterCrossDropFkConstraints(mainTable, droppedSet)) {
97
+ const isMutual = droppedFkTargets.get(referencedId)?.has(mainTable.stableId) === true;
98
+ if (!isMutual)
99
+ continue;
100
+ const droppedConstraintStableId = constraintStableId(mainTable, constraint.name);
101
+ externallyDroppedConstraints.add(constraint.name);
102
+ if (!explicitConstraintDropIds.has(droppedConstraintStableId)) {
103
+ const injectedDrop = new AlterTableDropConstraint({
104
+ table: mainTable,
105
+ constraint,
106
+ });
107
+ const existingDrops = injectedConstraintDropsByTableId.get(mainTable.stableId) ?? [];
108
+ existingDrops.push(injectedDrop);
109
+ injectedConstraintDropsByTableId.set(mainTable.stableId, existingDrops);
110
+ explicitConstraintDropIds.add(droppedConstraintStableId);
111
+ didMutate = true;
112
+ }
113
+ }
114
+ if (!hasSameEntries(dropTableChange.externallyDroppedConstraints, externallyDroppedConstraints)) {
115
+ externallyDroppedConstraintsByTableId.set(mainTable.stableId, externallyDroppedConstraints);
116
+ didMutate = true;
117
+ }
118
+ }
119
+ if (!didMutate) {
120
+ return changes;
121
+ }
122
+ const normalizedChanges = [];
123
+ for (const change of structurallyNormalizedChanges) {
124
+ if (!(change instanceof DropTable)) {
125
+ normalizedChanges.push(change);
126
+ continue;
127
+ }
128
+ const injectedConstraintDrops = injectedConstraintDropsByTableId.get(change.table.stableId) ?? [];
129
+ if (injectedConstraintDrops.length > 0) {
130
+ normalizedChanges.push(...injectedConstraintDrops);
131
+ }
132
+ const externallyDroppedConstraints = externallyDroppedConstraintsByTableId.get(change.table.stableId);
133
+ if (!externallyDroppedConstraints) {
134
+ normalizedChanges.push(change);
135
+ continue;
136
+ }
137
+ normalizedChanges.push(new DropTable({
138
+ table: change.table,
139
+ externallyDroppedConstraints,
140
+ }));
141
+ }
142
+ return normalizedChanges;
143
+ }
@@ -45,8 +45,38 @@ interface CreatePoolOptions extends Partial<PoolConfig> {
45
45
  }
46
46
  /**
47
47
  * Create a Pool with custom type handlers and optional event listeners.
48
+ *
49
+ * `connectionString` may be `undefined` when the caller needs pg to rely on
50
+ * explicit `host`/`port`/`user`/... fields from `options` instead — notably
51
+ * the bracketed-IPv6 workaround in {@link poolConfigFromUrl}, where passing
52
+ * the connection string would cause `pg-connection-string` to re-inject the
53
+ * bracketed host that breaks `getaddrinfo`.
54
+ */
55
+ export declare function createPool(connectionString: string | undefined, options?: CreatePoolOptions): Pool;
56
+ /**
57
+ * Build a pg {@link PoolConfig} from a cleaned connection URL.
58
+ *
59
+ * For most URLs this just returns `{ connectionString }` and pg does its
60
+ * normal parsing. But for URLs whose hostname is a bracketed IPv6 literal
61
+ * (e.g. `postgresql://user@[::1]:5432/db`, as produced by
62
+ * {@link normalizeConnectionUrl}), we expand the URL into explicit
63
+ * `host`/`port`/`user`/`password`/`database` fields with a **bare** IPv6
64
+ * host — no brackets.
65
+ *
66
+ * This works around a `pg-connection-string` quirk: its parser sets
67
+ * `config.host` to the WHATWG `URL.hostname`, which keeps the surrounding
68
+ * `[...]` for IPv6 literals. That bracketed value is then passed verbatim to
69
+ * `getaddrinfo`, which rejects it with `ENOTFOUND`. Since
70
+ * `pg`'s connection-parameters module does
71
+ * `Object.assign({}, config, parse(connectionString))`, any `host` we pass
72
+ * alongside `connectionString` gets clobbered — so we drop `connectionString`
73
+ * entirely on this path and hand pg the parsed fields directly.
74
+ *
75
+ * Remaining query parameters (e.g. `application_name`, `options`,
76
+ * `connect_timeout`) are forwarded as top-level config keys, mirroring how
77
+ * `pg-connection-string` would normally surface them.
48
78
  */
49
- export declare function createPool(connectionString: string, options?: CreatePoolOptions): Pool;
79
+ export declare function poolConfigFromUrl(cleanedUrl: string): PoolConfig;
50
80
  /**
51
81
  * End a pool and wait for all client sockets to fully close.
52
82
  *
@@ -182,11 +182,17 @@ export async function connectWithRetry(opts) {
182
182
  }
183
183
  /**
184
184
  * Create a Pool with custom type handlers and optional event listeners.
185
+ *
186
+ * `connectionString` may be `undefined` when the caller needs pg to rely on
187
+ * explicit `host`/`port`/`user`/... fields from `options` instead — notably
188
+ * the bracketed-IPv6 workaround in {@link poolConfigFromUrl}, where passing
189
+ * the connection string would cause `pg-connection-string` to re-inject the
190
+ * bracketed host that breaks `getaddrinfo`.
185
191
  */
186
192
  export function createPool(connectionString, options) {
187
193
  const { onConnect, onError, onAcquire, onRemove, ...config } = options ?? {};
188
194
  const pool = new Pool({
189
- connectionString,
195
+ ...(connectionString ? { connectionString } : {}),
190
196
  max: DEFAULT_POOL_MAX,
191
197
  connectionTimeoutMillis: DEFAULT_CONNECTION_TIMEOUT_MS,
192
198
  ...config,
@@ -242,6 +248,51 @@ export function createPool(connectionString, options) {
242
248
  pool.on("remove", onRemove);
243
249
  return pool;
244
250
  }
251
+ /**
252
+ * Build a pg {@link PoolConfig} from a cleaned connection URL.
253
+ *
254
+ * For most URLs this just returns `{ connectionString }` and pg does its
255
+ * normal parsing. But for URLs whose hostname is a bracketed IPv6 literal
256
+ * (e.g. `postgresql://user@[::1]:5432/db`, as produced by
257
+ * {@link normalizeConnectionUrl}), we expand the URL into explicit
258
+ * `host`/`port`/`user`/`password`/`database` fields with a **bare** IPv6
259
+ * host — no brackets.
260
+ *
261
+ * This works around a `pg-connection-string` quirk: its parser sets
262
+ * `config.host` to the WHATWG `URL.hostname`, which keeps the surrounding
263
+ * `[...]` for IPv6 literals. That bracketed value is then passed verbatim to
264
+ * `getaddrinfo`, which rejects it with `ENOTFOUND`. Since
265
+ * `pg`'s connection-parameters module does
266
+ * `Object.assign({}, config, parse(connectionString))`, any `host` we pass
267
+ * alongside `connectionString` gets clobbered — so we drop `connectionString`
268
+ * entirely on this path and hand pg the parsed fields directly.
269
+ *
270
+ * Remaining query parameters (e.g. `application_name`, `options`,
271
+ * `connect_timeout`) are forwarded as top-level config keys, mirroring how
272
+ * `pg-connection-string` would normally surface them.
273
+ */
274
+ export function poolConfigFromUrl(cleanedUrl) {
275
+ const urlObj = new URL(cleanedUrl);
276
+ if (!urlObj.hostname.startsWith("[")) {
277
+ return { connectionString: cleanedUrl };
278
+ }
279
+ const config = {
280
+ host: urlObj.hostname.slice(1, -1),
281
+ };
282
+ if (urlObj.port)
283
+ config.port = Number(urlObj.port);
284
+ if (urlObj.username)
285
+ config.user = decodeURIComponent(urlObj.username);
286
+ if (urlObj.password)
287
+ config.password = decodeURIComponent(urlObj.password);
288
+ if (urlObj.pathname.length > 1) {
289
+ config.database = decodeURIComponent(urlObj.pathname.slice(1));
290
+ }
291
+ for (const [key, value] of urlObj.searchParams) {
292
+ config[key] = value;
293
+ }
294
+ return config;
295
+ }
245
296
  /**
246
297
  * End a pool and wait for all client sockets to fully close.
247
298
  *
@@ -269,7 +320,11 @@ export async function createManagedPool(url, options) {
269
320
  // Non-IPv6 hosts are returned unchanged.
270
321
  const normalizedUrl = normalizeConnectionUrl(url);
271
322
  const sslConfig = await parseSslConfig(normalizedUrl, options?.label ?? "target");
272
- const pool = createPool(sslConfig.cleanedUrl, {
323
+ // Expand bracketed-IPv6 URLs into explicit pg fields so the brackets never
324
+ // reach `getaddrinfo` — see `poolConfigFromUrl` for the full rationale.
325
+ const connectionConfig = poolConfigFromUrl(sslConfig.cleanedUrl);
326
+ const pool = createPool(connectionConfig.connectionString, {
327
+ ...connectionConfig,
273
328
  ...(sslConfig.ssl !== undefined ? { ssl: sslConfig.ssl } : {}),
274
329
  onError: (err) => {
275
330
  if (err.code !== "57P01") {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@supabase/pg-delta",
3
- "version": "1.0.0-alpha.14",
3
+ "version": "1.0.0-alpha.16",
4
4
  "description": "PostgreSQL migrations made easy",
5
5
  "type": "module",
6
6
  "sideEffects": false,
@@ -1,6 +1,7 @@
1
1
  import debug from "debug";
2
2
  import type { Catalog } from "./catalog.model.ts";
3
3
  import { expandReplaceDependencies } from "./expand-replace-dependencies.ts";
4
+ import { normalizePostDiffCycles } from "./post-diff-cycle-breaking.ts";
4
5
 
5
6
  const debugCatalog = debug("pg-delta:catalog");
6
7
 
@@ -232,11 +233,16 @@ export function diffCatalogs(
232
233
  return true;
233
234
  });
234
235
 
235
- filteredChanges = expandReplaceDependencies({
236
+ const expandedDependencies = expandReplaceDependencies({
236
237
  changes: filteredChanges,
237
238
  mainCatalog: main,
238
239
  branchCatalog: branch,
239
240
  });
241
+ filteredChanges = normalizePostDiffCycles({
242
+ changes: expandedDependencies.changes,
243
+ mainCatalog: main,
244
+ replacedTableIds: expandedDependencies.replacedTableIds,
245
+ });
240
246
 
241
247
  debugCatalog(
242
248
  "changes catalog diff: %O",