@supabase/pg-delta 1.0.0-alpha.12 → 1.0.0-alpha.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. package/dist/core/connection-url.d.ts +32 -0
  2. package/dist/core/connection-url.js +77 -0
  3. package/dist/core/export/index.d.ts +2 -2
  4. package/dist/core/export/index.js +4 -1
  5. package/dist/core/integrations/integration.types.d.ts +26 -1
  6. package/dist/core/integrations/integration.types.js +31 -1
  7. package/dist/core/integrations/supabase.js +1 -0
  8. package/dist/core/objects/procedure/procedure.diff.js +8 -0
  9. package/dist/core/objects/table/changes/table.alter.js +4 -1
  10. package/dist/core/objects/table/table.diff.js +7 -2
  11. package/dist/core/plan/create.js +5 -17
  12. package/dist/core/plan/types.d.ts +3 -6
  13. package/dist/core/postgres-config.d.ts +27 -0
  14. package/dist/core/postgres-config.js +99 -7
  15. package/package.json +2 -1
  16. package/src/core/connection-url.test.ts +142 -0
  17. package/src/core/connection-url.ts +82 -0
  18. package/src/core/export/index.ts +13 -4
  19. package/src/core/integrations/integration.types.ts +59 -1
  20. package/src/core/integrations/supabase.ts +1 -0
  21. package/src/core/objects/procedure/procedure.diff.test.ts +25 -0
  22. package/src/core/objects/procedure/procedure.diff.ts +12 -0
  23. package/src/core/objects/table/changes/table.alter.test.ts +14 -0
  24. package/src/core/objects/table/changes/table.alter.ts +4 -1
  25. package/src/core/objects/table/table.diff.test.ts +55 -0
  26. package/src/core/objects/table/table.diff.ts +10 -2
  27. package/src/core/plan/create.ts +11 -27
  28. package/src/core/plan/types.ts +3 -6
  29. package/src/core/postgres-config.test.ts +241 -0
  30. package/src/core/postgres-config.ts +127 -16
@@ -0,0 +1,32 @@
1
+ /**
2
+ * Connection URL normalization for pg-delta.
3
+ *
4
+ * Auto-normalizes percent-encoded IPv6 hosts in PostgreSQL connection URLs.
5
+ * A URL like `postgresql://user:pass@2406%3Ada18%3A...%3Ab3c9:5432/db`
6
+ * becomes `postgresql://user:pass@[2406:da18:...:b3c9]:5432/db` before it
7
+ * reaches `pg-connection-string` / `pg.Pool`, so DNS resolution sees the
8
+ * address in its canonical bracketed form.
9
+ *
10
+ * Non-IPv6 hosts (IPv4, DNS names, already-bracketed IPv6, partial fragments
11
+ * that just happen to contain `%3A`) are returned verbatim.
12
+ */
13
+ /**
14
+ * Return true if `value` is a valid IPv6 literal in any canonical form:
15
+ * full 8-group, `::` compression, or IPv4-mapped (`::ffff:1.2.3.4`).
16
+ * RFC 4007 zone identifiers (`fe80::1%eth0`) are accepted.
17
+ */
18
+ export declare function isIPv6(value: string): boolean;
19
+ /**
20
+ * Normalize a PostgreSQL connection URL so IPv6 hosts reach pg in the
21
+ * canonical bracketed form.
22
+ *
23
+ * If the URL's hostname contains a percent-encoded colon AND the decoded
24
+ * hostname is a valid IPv6 literal, the hostname is decoded and wrapped in
25
+ * `[...]`. All other fields (scheme, userinfo, port, path, query, fragment)
26
+ * are preserved byte-for-byte from the input.
27
+ *
28
+ * Any URL whose decoded hostname does not validate as IPv6 is returned
29
+ * verbatim, so a malformed input will surface its usual downstream error
30
+ * instead of being silently rewritten.
31
+ */
32
+ export declare function normalizeConnectionUrl(url: string): string;
@@ -0,0 +1,77 @@
1
+ /**
2
+ * Connection URL normalization for pg-delta.
3
+ *
4
+ * Auto-normalizes percent-encoded IPv6 hosts in PostgreSQL connection URLs.
5
+ * A URL like `postgresql://user:pass@2406%3Ada18%3A...%3Ab3c9:5432/db`
6
+ * becomes `postgresql://user:pass@[2406:da18:...:b3c9]:5432/db` before it
7
+ * reaches `pg-connection-string` / `pg.Pool`, so DNS resolution sees the
8
+ * address in its canonical bracketed form.
9
+ *
10
+ * Non-IPv6 hosts (IPv4, DNS names, already-bracketed IPv6, partial fragments
11
+ * that just happen to contain `%3A`) are returned verbatim.
12
+ */
13
+ // IPv6 detection regex vendored from ip-regex (Sindre Sorhus, MIT).
14
+ // https://github.com/sindresorhus/ip-regex
15
+ const v4 = "(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]\\d|\\d)(?:\\.(?:25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]\\d|\\d)){3}";
16
+ const v6seg = "[a-fA-F\\d]{1,4}";
17
+ const v6 = `
18
+ (?:
19
+ (?:${v6seg}:){7}(?:${v6seg}|:)|
20
+ (?:${v6seg}:){6}(?:${v4}|:${v6seg}|:)|
21
+ (?:${v6seg}:){5}(?::${v4}|(?::${v6seg}){1,2}|:)|
22
+ (?:${v6seg}:){4}(?:(?::${v6seg}){0,1}:${v4}|(?::${v6seg}){1,3}|:)|
23
+ (?:${v6seg}:){3}(?:(?::${v6seg}){0,2}:${v4}|(?::${v6seg}){1,4}|:)|
24
+ (?:${v6seg}:){2}(?:(?::${v6seg}){0,3}:${v4}|(?::${v6seg}){1,5}|:)|
25
+ (?:${v6seg}:){1}(?:(?::${v6seg}){0,4}:${v4}|(?::${v6seg}){1,6}|:)|
26
+ (?::(?:(?::${v6seg}){0,5}:${v4}|(?::${v6seg}){1,7}|:))
27
+ )(?:%[0-9a-zA-Z]{1,})?
28
+ `
29
+ .replace(/\s*\/\/.*$/gm, "")
30
+ .replace(/\n/g, "")
31
+ .trim();
32
+ const V6_EXACT = new RegExp(`^${v6}$`);
33
+ /**
34
+ * Return true if `value` is a valid IPv6 literal in any canonical form:
35
+ * full 8-group, `::` compression, or IPv4-mapped (`::ffff:1.2.3.4`).
36
+ * RFC 4007 zone identifiers (`fe80::1%eth0`) are accepted.
37
+ */
38
+ export function isIPv6(value) {
39
+ return typeof value === "string" && V6_EXACT.test(value);
40
+ }
41
+ /**
42
+ * Normalize a PostgreSQL connection URL so IPv6 hosts reach pg in the
43
+ * canonical bracketed form.
44
+ *
45
+ * If the URL's hostname contains a percent-encoded colon AND the decoded
46
+ * hostname is a valid IPv6 literal, the hostname is decoded and wrapped in
47
+ * `[...]`. All other fields (scheme, userinfo, port, path, query, fragment)
48
+ * are preserved byte-for-byte from the input.
49
+ *
50
+ * Any URL whose decoded hostname does not validate as IPv6 is returned
51
+ * verbatim, so a malformed input will surface its usual downstream error
52
+ * instead of being silently rewritten.
53
+ */
54
+ export function normalizeConnectionUrl(url) {
55
+ const urlObj = new URL(url);
56
+ // Cheap pre-filter: only look closer if the hostname contains a
57
+ // percent-encoded colon. Anything else is left entirely untouched.
58
+ if (!/%3[aA]/.test(urlObj.hostname))
59
+ return url;
60
+ const decodedHost = decodeURIComponent(urlObj.hostname);
61
+ // Authoritative validation: only normalize when the decoded string is a
62
+ // real IPv6 literal. Rejects partial fragments, random hostnames that
63
+ // happen to contain `%3A`, and any malformed input.
64
+ if (!isIPv6(decodedHost))
65
+ return url;
66
+ // Preserve username/password/port/path/search/hash exactly as they appear
67
+ // in the WHATWG URL model (these are returned already percent-encoded).
68
+ const scheme = `${urlObj.protocol}//`;
69
+ const auth = urlObj.username
70
+ ? urlObj.password
71
+ ? `${urlObj.username}:${urlObj.password}@`
72
+ : `${urlObj.username}@`
73
+ : "";
74
+ const port = urlObj.port ? `:${urlObj.port}` : "";
75
+ const tail = `${urlObj.pathname}${urlObj.search}${urlObj.hash}`;
76
+ return `${scheme}${auth}[${decodedHost}]${port}${tail}`;
77
+ }
@@ -1,7 +1,7 @@
1
1
  /**
2
2
  * Declarative schema export.
3
3
  */
4
- import type { Integration } from "../integrations/integration.types.ts";
4
+ import { type Integration } from "../integrations/integration.types.ts";
5
5
  import type { createPlan } from "../plan/create.ts";
6
6
  import type { SqlFormatOptions } from "../plan/sql-format/types.ts";
7
7
  import type { DeclarativeSchemaOutput, Grouping } from "./types.ts";
@@ -12,7 +12,7 @@ import type { DeclarativeSchemaOutput, Grouping } from "./types.ts";
12
12
  type PlanResult = NonNullable<Awaited<ReturnType<typeof createPlan>>>;
13
13
  export interface ExportOptions {
14
14
  /** Integration for custom serialization */
15
- integration?: Integration;
15
+ integration?: Pick<Integration, "serialize">;
16
16
  /**
17
17
  * SQL formatter options to control the output style.
18
18
  * Merged on top of the default export options (maxWidth: 180, keywordCase: "upper").
@@ -2,6 +2,7 @@
2
2
  * Declarative schema export.
3
3
  */
4
4
  import { buildPlanScopeFingerprint, hashStableIds } from "../fingerprint.js";
5
+ import { resolveIntegration, } from "../integrations/integration.types.js";
5
6
  import { DEFAULT_OPTIONS } from "../plan/sql-format/constants.js";
6
7
  import { formatSqlScript } from "../plan/statements.js";
7
8
  import { createFileMapper } from "./file-mapper.js";
@@ -22,7 +23,9 @@ import { groupChangesByFile } from "./grouper.js";
22
23
  */
23
24
  export function exportDeclarativeSchema(planResult, options) {
24
25
  const { ctx, sortedChanges } = planResult;
25
- const integration = options?.integration;
26
+ const integration = options?.integration
27
+ ? resolveIntegration(options?.integration)
28
+ : {};
26
29
  const formatOptions = options?.formatOptions === null
27
30
  ? undefined
28
31
  : {
@@ -1,6 +1,31 @@
1
+ import { type FilterDSL } from "./filter/dsl.ts";
1
2
  import type { ChangeFilter } from "./filter/filter.types.ts";
3
+ import { type SerializeDSL } from "./serialize/dsl.ts";
2
4
  import type { ChangeSerializer } from "./serialize/serialize.types.ts";
3
- export type Integration = {
5
+ /**
6
+ * A resolved integration is an integration that has been compiled to a function.
7
+ */
8
+ export type ResolvedIntegration = {
4
9
  filter?: ChangeFilter;
5
10
  serialize?: ChangeSerializer;
6
11
  };
12
+ /**
13
+ * A raw integration is an integration that has not been compiled to a function.
14
+ */
15
+ export type IntegrationDSL = {
16
+ filter?: FilterDSL;
17
+ serialize?: SerializeDSL;
18
+ };
19
+ /**
20
+ * An integration is a raw integration that has not been compiled to a function.
21
+ */
22
+ export type Integration = {
23
+ filter?: ResolvedIntegration["filter"] | IntegrationDSL["filter"];
24
+ serialize?: ResolvedIntegration["serialize"] | IntegrationDSL["serialize"];
25
+ };
26
+ /**
27
+ * Resolve an integration either DSL or already resovled into a ResolvedIntegration.
28
+ * @param integration - The integration to resolve.
29
+ * @returns The resolved integration.
30
+ */
31
+ export declare function resolveIntegration(integration: Integration): ResolvedIntegration | undefined;
@@ -1 +1,31 @@
1
- export {};
1
+ import { compileFilterDSL } from "./filter/dsl.js";
2
+ import { compileSerializeDSL } from "./serialize/dsl.js";
3
+ /**
4
+ * Resolve an integration either DSL or already resovled into a ResolvedIntegration.
5
+ * @param integration - The integration to resolve.
6
+ * @returns The resolved integration.
7
+ */
8
+ export function resolveIntegration(integration) {
9
+ // Determine if filter/serialize are DSL or functions, and extract DSL for storage
10
+ const isFilterDSL = integration.filter && typeof integration.filter !== "function";
11
+ const isSerializeDSL = integration.serialize && typeof integration.serialize !== "function";
12
+ const filterDSL = isFilterDSL ? integration.filter : undefined;
13
+ const serializeDSL = isSerializeDSL
14
+ ? integration.serialize
15
+ : undefined;
16
+ // Build final integration: compile DSL if needed, use functions directly otherwise
17
+ if (integration.filter || integration.serialize) {
18
+ return {
19
+ filter: typeof integration.filter === "function"
20
+ ? integration.filter
21
+ : filterDSL
22
+ ? compileFilterDSL(filterDSL)
23
+ : undefined,
24
+ serialize: typeof integration.serialize === "function"
25
+ ? integration.serialize
26
+ : serializeDSL
27
+ ? compileSerializeDSL(serializeDSL)
28
+ : undefined,
29
+ };
30
+ }
31
+ }
@@ -13,6 +13,7 @@ const SUPABASE_SYSTEM_SCHEMAS = [
13
13
  "_supavisor",
14
14
  "auth",
15
15
  "cron",
16
+ "etl",
16
17
  "extensions",
17
18
  "graphql",
18
19
  "graphql_public",
@@ -99,6 +99,14 @@ export function diffProcedures(ctx, main, branch) {
99
99
  if (nonAlterablePropsChanged) {
100
100
  // Replace the entire procedure
101
101
  changes.push(new CreateProcedure({ procedure: branchProcedure, orReplace: true }));
102
+ if (mainProcedure.comment !== branchProcedure.comment) {
103
+ if (branchProcedure.comment === null) {
104
+ changes.push(new DropCommentOnProcedure({ procedure: mainProcedure }));
105
+ }
106
+ else {
107
+ changes.push(new CreateCommentOnProcedure({ procedure: branchProcedure }));
108
+ }
109
+ }
102
110
  }
103
111
  else {
104
112
  // Only alterable properties changed - check each one
@@ -462,13 +462,16 @@ export class AlterTableAlterColumnSetDefault extends AlterTableChange {
462
462
  }
463
463
  serialize(_options) {
464
464
  const set = this.column.is_generated ? "SET EXPRESSION AS" : "SET DEFAULT";
465
+ const value = this.column.is_generated
466
+ ? `(${this.column.default ?? "NULL"})`
467
+ : (this.column.default ?? "NULL");
465
468
  return [
466
469
  "ALTER TABLE",
467
470
  `${this.table.schema}.${this.table.name}`,
468
471
  "ALTER COLUMN",
469
472
  this.column.name,
470
473
  set,
471
- this.column.default ?? "NULL",
474
+ value,
472
475
  ].join(" ");
473
476
  }
474
477
  }
@@ -486,9 +486,14 @@ export function diffTables(ctx, main, branch) {
486
486
  // Set new default value
487
487
  const isGeneratedColumn = branchCol.is_generated;
488
488
  const isPostgresLowerThan17 = ctx.version < 170000;
489
- if (isGeneratedColumn && isPostgresLowerThan17) {
489
+ const generatedStatusChanged = mainCol.is_generated !== branchCol.is_generated;
490
+ if (isGeneratedColumn &&
491
+ (isPostgresLowerThan17 || generatedStatusChanged)) {
490
492
  // For generated columns in < PostgreSQL 17, we need to drop and recreate
491
- // instead of using SET EXPRESSION AS for computed columns
493
+ // instead of using SET EXPRESSION AS for computed columns. We also
494
+ // need to recreate the column when switching between regular and
495
+ // generated states because SET EXPRESSION only applies to existing
496
+ // generated columns.
492
497
  // cf: https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=5d06e99a3
493
498
  // cf: https://www.postgresql.org/docs/release/17.0/
494
499
  // > Allow ALTER TABLE to change a column's generation expression
@@ -5,8 +5,7 @@ import { escapeIdentifier } from "pg";
5
5
  import { diffCatalogs } from "../catalog.diff.js";
6
6
  import { createEmptyCatalog, extractCatalog } from "../catalog.model.js";
7
7
  import { buildPlanScopeFingerprint, hashStableIds } from "../fingerprint.js";
8
- import { compileFilterDSL, } from "../integrations/filter/dsl.js";
9
- import { compileSerializeDSL, } from "../integrations/serialize/dsl.js";
8
+ import { resolveIntegration, } from "../integrations/integration.types.js";
10
9
  import { createManagedPool, endPool } from "../postgres-config.js";
11
10
  import { sortChanges } from "../sort/sort-changes.js";
12
11
  import { classifyChangesRisk } from "./risk.js";
@@ -103,21 +102,10 @@ function buildPlanForCatalogs(fromCatalog, toCatalog, options = {}) {
103
102
  ? serializeOption
104
103
  : undefined;
105
104
  // Build final integration: compile DSL if needed, use functions directly otherwise
106
- let finalIntegration;
107
- if (filterOption || serializeOption) {
108
- finalIntegration = {
109
- filter: typeof filterOption === "function"
110
- ? filterOption
111
- : filterDSL
112
- ? compileFilterDSL(filterDSL)
113
- : undefined,
114
- serialize: typeof serializeOption === "function"
115
- ? serializeOption
116
- : serializeDSL
117
- ? compileSerializeDSL(serializeDSL)
118
- : undefined,
119
- };
120
- }
105
+ const finalIntegration = resolveIntegration({
106
+ filter: filterOption,
107
+ serialize: serializeOption,
108
+ });
121
109
  // Use filter from final integration
122
110
  const filterFn = finalIntegration?.filter;
123
111
  let filteredChanges = filterFn
@@ -3,10 +3,7 @@
3
3
  */
4
4
  import z from "zod";
5
5
  import type { Change } from "../change.types.ts";
6
- import type { FilterDSL } from "../integrations/filter/dsl.ts";
7
- import type { ChangeFilter } from "../integrations/filter/filter.types.ts";
8
- import type { SerializeDSL } from "../integrations/serialize/dsl.ts";
9
- import type { ChangeSerializer } from "../integrations/serialize/serialize.types.ts";
6
+ import type { Integration } from "../integrations/integration.types.ts";
10
7
  export type PlanRisk = {
11
8
  level: "safe";
12
9
  } | {
@@ -133,9 +130,9 @@ export type Plan = z.infer<typeof PlanSchema>;
133
130
  */
134
131
  export interface CreatePlanOptions {
135
132
  /** Filter - either FilterDSL (stored in plan) or ChangeFilter function (not stored) */
136
- filter?: FilterDSL | ChangeFilter;
133
+ filter?: Integration["filter"];
137
134
  /** Serialize - either SerializeDSL (stored in plan) or ChangeSerializer function (not stored) */
138
- serialize?: SerializeDSL | ChangeSerializer;
135
+ serialize?: Integration["serialize"];
139
136
  /** Role to use when executing the migration (SET ROLE will be added to statements) */
140
137
  role?: string;
141
138
  /**
@@ -3,6 +3,33 @@
3
3
  */
4
4
  import type { ClientBase, PoolClient, PoolConfig } from "pg";
5
5
  import { Pool } from "pg";
6
+ /**
7
+ * Return true when `err` represents a transient connect failure that makes
8
+ * sense to retry with backoff (e.g. refused connections, DNS blips, our own
9
+ * eager-connect timeout wrapper). Returns false for permanent failures such
10
+ * as authentication errors, TLS negotiation errors, and `ENOTFOUND`.
11
+ *
12
+ * Unknown errors are treated as retryable on purpose: transient-by-default
13
+ * is safer here because a duplicated retry is strictly cheaper than a spurious
14
+ * hard failure during catalog extraction.
15
+ */
16
+ export declare function isRetryableConnectError(err: unknown): boolean;
17
+ /**
18
+ * Retry an async `connect` operation with bounded exponential backoff.
19
+ * Stops immediately on a non-retryable error. On exhausted attempts, throws
20
+ * the last observed error.
21
+ *
22
+ * Exposed for testing — production call sites always go through
23
+ * {@link createManagedPool}.
24
+ */
25
+ export declare function connectWithRetry<T>(opts: {
26
+ connect: (attempt: number) => Promise<T>;
27
+ isRetryable?: (err: unknown) => boolean;
28
+ maxAttempts?: number;
29
+ baseBackoffMs?: number;
30
+ maxBackoffMs?: number;
31
+ sleep?: (ms: number) => Promise<void>;
32
+ }): Promise<T>;
6
33
  /**
7
34
  * Options for creating a Pool with event listeners.
8
35
  */
@@ -2,6 +2,7 @@
2
2
  * PostgreSQL connection configuration with custom type handlers.
3
3
  */
4
4
  import { escapeIdentifier, Pool, types } from "pg";
5
+ import { normalizeConnectionUrl } from "./connection-url.js";
5
6
  import { parseSslConfig } from "./plan/ssl-config.js";
6
7
  // ============================================================================
7
8
  // Array Parser
@@ -96,6 +97,89 @@ types.setTypeParser(1016, (val) => parseArray(val, parseIntElement)); // int8[]
96
97
  const DEFAULT_POOL_MAX = Number(process.env.PGDELTA_POOL_MAX) || 5;
97
98
  const DEFAULT_CONNECTION_TIMEOUT_MS = Number(process.env.PGDELTA_CONNECTION_TIMEOUT_MS) || 3_000;
98
99
  const DEFAULT_CONNECT_TIMEOUT_MS = Number(process.env.PGDELTA_CONNECT_TIMEOUT_MS) || 2_500;
100
+ const DEFAULT_CONNECT_MAX_ATTEMPTS = Number(process.env.PGDELTA_CONNECT_MAX_ATTEMPTS) || 3;
101
+ const DEFAULT_CONNECT_BASE_BACKOFF_MS = Number(process.env.PGDELTA_CONNECT_BASE_BACKOFF_MS) || 250;
102
+ const DEFAULT_CONNECT_MAX_BACKOFF_MS = Number(process.env.PGDELTA_CONNECT_MAX_BACKOFF_MS) || 1_000;
103
+ // PostgreSQL auth-class SQLSTATE codes: not retryable.
104
+ const NON_RETRYABLE_PG_CODES = new Set([
105
+ "28000", // invalid_authorization_specification
106
+ "28P01", // invalid_password
107
+ "28P02", // pgdelta: alias reserved here to future-proof against new auth codes
108
+ ]);
109
+ // Non-retryable TLS/SSL markers. The `pg` driver surfaces TLS failures as
110
+ // either plain Node `Error` instances with a code on `ERR_TLS_*` or error
111
+ // messages that include well-known cert/TLS terminology; we match both
112
+ // because node-pg normalises some of these.
113
+ const TLS_MESSAGE_MARKERS = [
114
+ "self-signed certificate",
115
+ "self signed certificate",
116
+ "unable to verify the first certificate",
117
+ "certificate has expired",
118
+ "tls",
119
+ "ssl",
120
+ ];
121
+ /**
122
+ * Return true when `err` represents a transient connect failure that makes
123
+ * sense to retry with backoff (e.g. refused connections, DNS blips, our own
124
+ * eager-connect timeout wrapper). Returns false for permanent failures such
125
+ * as authentication errors, TLS negotiation errors, and `ENOTFOUND`.
126
+ *
127
+ * Unknown errors are treated as retryable on purpose: transient-by-default
128
+ * is safer here because a duplicated retry is strictly cheaper than a spurious
129
+ * hard failure during catalog extraction.
130
+ */
131
+ export function isRetryableConnectError(err) {
132
+ if (!(err instanceof Error))
133
+ return true;
134
+ const code = err.code;
135
+ if (code && NON_RETRYABLE_PG_CODES.has(code))
136
+ return false;
137
+ if (code === "ENOTFOUND")
138
+ return false;
139
+ if (code && typeof code === "string" && code.startsWith("ERR_TLS")) {
140
+ return false;
141
+ }
142
+ const message = err.message?.toLowerCase() ?? "";
143
+ // Our own eager-connect timeout wrapper is retryable (flaky network).
144
+ if (message.includes("timed out after"))
145
+ return true;
146
+ for (const marker of TLS_MESSAGE_MARKERS) {
147
+ if (message.includes(marker))
148
+ return false;
149
+ }
150
+ return true;
151
+ }
152
+ /**
153
+ * Retry an async `connect` operation with bounded exponential backoff.
154
+ * Stops immediately on a non-retryable error. On exhausted attempts, throws
155
+ * the last observed error.
156
+ *
157
+ * Exposed for testing — production call sites always go through
158
+ * {@link createManagedPool}.
159
+ */
160
+ export async function connectWithRetry(opts) {
161
+ const maxAttempts = opts.maxAttempts ?? DEFAULT_CONNECT_MAX_ATTEMPTS;
162
+ const baseBackoffMs = opts.baseBackoffMs ?? DEFAULT_CONNECT_BASE_BACKOFF_MS;
163
+ const maxBackoffMs = opts.maxBackoffMs ?? DEFAULT_CONNECT_MAX_BACKOFF_MS;
164
+ const isRetryable = opts.isRetryable ?? isRetryableConnectError;
165
+ const sleep = opts.sleep ?? ((ms) => new Promise((r) => setTimeout(r, ms)));
166
+ let lastError;
167
+ for (let attempt = 1; attempt <= maxAttempts; attempt++) {
168
+ try {
169
+ return await opts.connect(attempt);
170
+ }
171
+ catch (err) {
172
+ lastError = err;
173
+ if (attempt >= maxAttempts || !isRetryable(err)) {
174
+ throw err;
175
+ }
176
+ const backoff = Math.min(baseBackoffMs * 2 ** (attempt - 1), maxBackoffMs);
177
+ await sleep(backoff);
178
+ }
179
+ }
180
+ // Unreachable: loop either returns or throws.
181
+ throw lastError;
182
+ }
99
183
  /**
100
184
  * Create a Pool with custom type handlers and optional event listeners.
101
185
  */
@@ -180,7 +264,11 @@ export function createPool(connectionString, options) {
180
264
  * to close (via {@link endPool}).
181
265
  */
182
266
  export async function createManagedPool(url, options) {
183
- const sslConfig = await parseSslConfig(url, options?.label ?? "target");
267
+ // Normalize percent-encoded IPv6 hosts (e.g. `2406%3A...%3Ab3c9`) into the
268
+ // canonical bracketed form before the URL reaches `parseSslConfig` or pg.
269
+ // Non-IPv6 hosts are returned unchanged.
270
+ const normalizedUrl = normalizeConnectionUrl(url);
271
+ const sslConfig = await parseSslConfig(normalizedUrl, options?.label ?? "target");
184
272
  const pool = createPool(sslConfig.cleanedUrl, {
185
273
  ...(sslConfig.ssl !== undefined ? { ssl: sslConfig.ssl } : {}),
186
274
  onError: (err) => {
@@ -197,15 +285,19 @@ export async function createManagedPool(url, options) {
197
285
  });
198
286
  // Eagerly validate connectivity so SSL/auth failures surface immediately
199
287
  // instead of hanging on the first real query. node-pg's connectionTimeoutMillis
200
- // is not reliably enforced under Bun when SSL negotiation hangs.
288
+ // is not reliably enforced under Bun when SSL negotiation hangs. Transient
289
+ // failures (refused connections, flaky DNS, our own timeout wrapper) are
290
+ // retried with bounded exponential backoff; auth/TLS/ENOTFOUND fail fast.
201
291
  const label = options?.label ?? "target";
202
292
  const timeoutMs = DEFAULT_CONNECT_TIMEOUT_MS;
203
293
  try {
204
- const client = await Promise.race([
205
- pool.connect(),
206
- new Promise((_, reject) => setTimeout(() => reject(new Error(`Connection to ${label} database timed out after ${timeoutMs}ms. ` +
207
- `The server may require SSL, use an invalid certificate, or be unreachable.`)), timeoutMs)),
208
- ]);
294
+ const client = await connectWithRetry({
295
+ connect: () => Promise.race([
296
+ pool.connect(),
297
+ new Promise((_, reject) => setTimeout(() => reject(new Error(`Connection to ${label} database timed out after ${timeoutMs}ms. ` +
298
+ `The server may require SSL, use an invalid certificate, or be unreachable.`)), timeoutMs)),
299
+ ]),
300
+ });
209
301
  client.release();
210
302
  }
211
303
  catch (err) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@supabase/pg-delta",
3
- "version": "1.0.0-alpha.12",
3
+ "version": "1.0.0-alpha.14",
4
4
  "description": "PostgreSQL migrations made easy",
5
5
  "type": "module",
6
6
  "sideEffects": false,
@@ -72,6 +72,7 @@
72
72
  "format-and-lint": "biome check . --error-on-warnings",
73
73
  "knip": "knip",
74
74
  "pgdelta": "bun src/cli/bin/cli.ts",
75
+ "sync-base-images": "bun scripts/sync-supabase-base-images.ts",
75
76
  "test": "bun scripts/run-tests.ts",
76
77
  "test:unit": "bun run test src/",
77
78
  "test:integration": "bun run test tests/",