@prisma-next/target-postgres 0.3.0-dev.34 → 0.3.0-dev.36
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +5 -1
- package/dist/control.d.mts +16 -0
- package/dist/control.d.mts.map +1 -0
- package/dist/control.mjs +2453 -0
- package/dist/control.mjs.map +1 -0
- package/dist/descriptor-meta-DxB8oZzB.mjs +13 -0
- package/dist/descriptor-meta-DxB8oZzB.mjs.map +1 -0
- package/dist/pack.d.mts +7 -0
- package/dist/pack.d.mts.map +1 -0
- package/dist/pack.mjs +9 -0
- package/dist/pack.mjs.map +1 -0
- package/dist/runtime.d.mts +9 -0
- package/dist/runtime.d.mts.map +1 -0
- package/dist/runtime.mjs +21 -0
- package/dist/runtime.mjs.map +1 -0
- package/package.json +28 -28
- package/src/core/migrations/planner.ts +172 -22
- package/src/core/migrations/runner.ts +27 -20
- package/src/core/migrations/statement-builders.ts +6 -6
- package/src/core/types.ts +5 -0
- package/src/exports/control.ts +1 -3
- package/src/exports/runtime.ts +7 -12
- package/dist/chunk-RKEXRSSI.js +0 -14
- package/dist/chunk-RKEXRSSI.js.map +0 -1
- package/dist/core/descriptor-meta.d.ts +0 -9
- package/dist/core/descriptor-meta.d.ts.map +0 -1
- package/dist/core/migrations/planner.d.ts +0 -14
- package/dist/core/migrations/planner.d.ts.map +0 -1
- package/dist/core/migrations/runner.d.ts +0 -8
- package/dist/core/migrations/runner.d.ts.map +0 -1
- package/dist/core/migrations/statement-builders.d.ts +0 -30
- package/dist/core/migrations/statement-builders.d.ts.map +0 -1
- package/dist/exports/control.d.ts +0 -8
- package/dist/exports/control.d.ts.map +0 -1
- package/dist/exports/control.js +0 -1260
- package/dist/exports/control.js.map +0 -1
- package/dist/exports/pack.d.ts +0 -4
- package/dist/exports/pack.d.ts.map +0 -1
- package/dist/exports/pack.js +0 -11
- package/dist/exports/pack.js.map +0 -1
- package/dist/exports/runtime.d.ts +0 -12
- package/dist/exports/runtime.d.ts.map +0 -1
- package/dist/exports/runtime.js +0 -19
- package/dist/exports/runtime.js.map +0 -1
package/dist/control.mjs
ADDED
|
@@ -0,0 +1,2453 @@
|
|
|
1
|
+
import { t as postgresTargetDescriptorMeta } from "./descriptor-meta-DxB8oZzB.mjs";
|
|
2
|
+
import { SQL_CHAR_CODEC_ID, SQL_FLOAT_CODEC_ID, SQL_INT_CODEC_ID, SQL_VARCHAR_CODEC_ID } from "@prisma-next/sql-relational-core/ast";
|
|
3
|
+
import { arraysEqual, isIndexSatisfied, isUniqueConstraintSatisfied, verifySqlSchema } from "@prisma-next/family-sql/schema-verify";
|
|
4
|
+
import { ifDefined } from "@prisma-next/utils/defined";
|
|
5
|
+
import { createMigrationPlan, extractCodecControlHooks, plannerFailure, plannerSuccess, runnerFailure, runnerSuccess } from "@prisma-next/family-sql/control";
|
|
6
|
+
import { readMarker } from "@prisma-next/family-sql/verify";
|
|
7
|
+
import { SqlQueryError } from "@prisma-next/sql-errors";
|
|
8
|
+
import { ok, okVoid } from "@prisma-next/utils/result";
|
|
9
|
+
|
|
10
|
+
//#region ../../6-adapters/postgres/dist/codec-ids-Bsm9c7ns.mjs
|
|
11
|
+
const PG_TEXT_CODEC_ID = "pg/text@1";
|
|
12
|
+
const PG_ENUM_CODEC_ID = "pg/enum@1";
|
|
13
|
+
const PG_CHAR_CODEC_ID = "pg/char@1";
|
|
14
|
+
const PG_VARCHAR_CODEC_ID = "pg/varchar@1";
|
|
15
|
+
const PG_INT_CODEC_ID = "pg/int@1";
|
|
16
|
+
const PG_INT2_CODEC_ID = "pg/int2@1";
|
|
17
|
+
const PG_INT4_CODEC_ID = "pg/int4@1";
|
|
18
|
+
const PG_INT8_CODEC_ID = "pg/int8@1";
|
|
19
|
+
const PG_FLOAT_CODEC_ID = "pg/float@1";
|
|
20
|
+
const PG_FLOAT4_CODEC_ID = "pg/float4@1";
|
|
21
|
+
const PG_FLOAT8_CODEC_ID = "pg/float8@1";
|
|
22
|
+
const PG_NUMERIC_CODEC_ID = "pg/numeric@1";
|
|
23
|
+
const PG_BOOL_CODEC_ID = "pg/bool@1";
|
|
24
|
+
const PG_BIT_CODEC_ID = "pg/bit@1";
|
|
25
|
+
const PG_VARBIT_CODEC_ID = "pg/varbit@1";
|
|
26
|
+
const PG_TIMESTAMP_CODEC_ID = "pg/timestamp@1";
|
|
27
|
+
const PG_TIMESTAMPTZ_CODEC_ID = "pg/timestamptz@1";
|
|
28
|
+
const PG_TIME_CODEC_ID = "pg/time@1";
|
|
29
|
+
const PG_TIMETZ_CODEC_ID = "pg/timetz@1";
|
|
30
|
+
const PG_INTERVAL_CODEC_ID = "pg/interval@1";
|
|
31
|
+
const PG_JSON_CODEC_ID = "pg/json@1";
|
|
32
|
+
const PG_JSONB_CODEC_ID = "pg/jsonb@1";
|
|
33
|
+
|
|
34
|
+
//#endregion
|
|
35
|
+
//#region ../../6-adapters/postgres/dist/descriptor-meta-D7pxo-wo.mjs
|
|
36
|
+
/**
|
|
37
|
+
* Shared SQL utility functions for the Postgres adapter.
|
|
38
|
+
*
|
|
39
|
+
* These functions handle safe SQL identifier and literal escaping
|
|
40
|
+
* with security validations to prevent injection and encoding issues.
|
|
41
|
+
*/
|
|
42
|
+
/**
|
|
43
|
+
* Error thrown when an invalid SQL identifier or literal is detected.
|
|
44
|
+
* Boundary layers map this to structured envelopes.
|
|
45
|
+
*/
|
|
46
|
+
var SqlEscapeError = class extends Error {
|
|
47
|
+
constructor(message, value, kind) {
|
|
48
|
+
super(message);
|
|
49
|
+
this.value = value;
|
|
50
|
+
this.kind = kind;
|
|
51
|
+
this.name = "SqlEscapeError";
|
|
52
|
+
}
|
|
53
|
+
};
|
|
54
|
+
/**
|
|
55
|
+
* Maximum length for PostgreSQL identifiers (NAMEDATALEN - 1).
|
|
56
|
+
*/
|
|
57
|
+
const MAX_IDENTIFIER_LENGTH$1 = 63;
|
|
58
|
+
/**
|
|
59
|
+
* Validates and quotes a PostgreSQL identifier (table, column, type, schema names).
|
|
60
|
+
*
|
|
61
|
+
* Security validations:
|
|
62
|
+
* - Rejects null bytes which could cause truncation or unexpected behavior
|
|
63
|
+
* - Rejects empty identifiers
|
|
64
|
+
* - Warns on identifiers exceeding PostgreSQL's 63-character limit
|
|
65
|
+
*
|
|
66
|
+
* @throws {SqlEscapeError} If the identifier contains null bytes or is empty
|
|
67
|
+
*/
|
|
68
|
+
function quoteIdentifier(identifier) {
|
|
69
|
+
if (identifier.length === 0) throw new SqlEscapeError("Identifier cannot be empty", identifier, "identifier");
|
|
70
|
+
if (identifier.includes("\0")) throw new SqlEscapeError("Identifier cannot contain null bytes", identifier.replace(/\0/g, "\\0"), "identifier");
|
|
71
|
+
if (identifier.length > MAX_IDENTIFIER_LENGTH$1) console.warn(`Identifier "${identifier.slice(0, 20)}..." exceeds PostgreSQL's ${MAX_IDENTIFIER_LENGTH$1}-character limit and will be truncated`);
|
|
72
|
+
return `"${identifier.replace(/"/g, "\"\"")}"`;
|
|
73
|
+
}
|
|
74
|
+
/**
|
|
75
|
+
* Escapes a string literal for safe use in SQL statements.
|
|
76
|
+
*
|
|
77
|
+
* Security validations:
|
|
78
|
+
* - Rejects null bytes which could cause truncation or unexpected behavior
|
|
79
|
+
*
|
|
80
|
+
* Note: This assumes PostgreSQL's `standard_conforming_strings` is ON (default since PG 9.1).
|
|
81
|
+
* Backslashes are treated as literal characters, not escape sequences.
|
|
82
|
+
*
|
|
83
|
+
* @throws {SqlEscapeError} If the value contains null bytes
|
|
84
|
+
*/
|
|
85
|
+
function escapeLiteral(value) {
|
|
86
|
+
if (value.includes("\0")) throw new SqlEscapeError("Literal value cannot contain null bytes", value.replace(/\0/g, "\\0"), "literal");
|
|
87
|
+
return value.replace(/'/g, "''");
|
|
88
|
+
}
|
|
89
|
+
/**
|
|
90
|
+
* Builds a qualified name (schema.object) with proper quoting.
|
|
91
|
+
*/
|
|
92
|
+
function qualifyName(schemaName, objectName) {
|
|
93
|
+
return `${quoteIdentifier(schemaName)}.${quoteIdentifier(objectName)}`;
|
|
94
|
+
}
|
|
95
|
+
/**
|
|
96
|
+
* Validates that an enum value doesn't exceed PostgreSQL's label length limit.
|
|
97
|
+
*
|
|
98
|
+
* PostgreSQL enum labels have a maximum length of NAMEDATALEN-1 (63 bytes by default).
|
|
99
|
+
* Unlike identifiers, enum labels that exceed this limit cause an error rather than
|
|
100
|
+
* silent truncation.
|
|
101
|
+
*
|
|
102
|
+
* @param value - The enum value to validate
|
|
103
|
+
* @param enumTypeName - Name of the enum type (for error messages)
|
|
104
|
+
* @throws {SqlEscapeError} If the value exceeds the maximum length
|
|
105
|
+
*/
|
|
106
|
+
function validateEnumValueLength(value, enumTypeName) {
|
|
107
|
+
if (value.length > MAX_IDENTIFIER_LENGTH$1) throw new SqlEscapeError(`Enum value "${value.slice(0, 20)}..." for type "${enumTypeName}" exceeds PostgreSQL's ${MAX_IDENTIFIER_LENGTH$1}-character label limit`, value, "literal");
|
|
108
|
+
}
|
|
109
|
+
const ENUM_INTROSPECT_QUERY = `
|
|
110
|
+
SELECT
|
|
111
|
+
n.nspname AS schema_name,
|
|
112
|
+
t.typname AS type_name,
|
|
113
|
+
array_agg(e.enumlabel ORDER BY e.enumsortorder) AS values
|
|
114
|
+
FROM pg_type t
|
|
115
|
+
JOIN pg_namespace n ON t.typnamespace = n.oid
|
|
116
|
+
JOIN pg_enum e ON t.oid = e.enumtypid
|
|
117
|
+
WHERE n.nspname = $1
|
|
118
|
+
GROUP BY n.nspname, t.typname
|
|
119
|
+
ORDER BY n.nspname, t.typname
|
|
120
|
+
`;
|
|
121
|
+
/**
|
|
122
|
+
* Type guard for string arrays. Used for runtime validation of introspected data.
|
|
123
|
+
*/
|
|
124
|
+
function isStringArray(value) {
|
|
125
|
+
return Array.isArray(value) && value.every((entry) => typeof entry === "string");
|
|
126
|
+
}
|
|
127
|
+
/**
|
|
128
|
+
* Parses a PostgreSQL array value into a JavaScript string array.
|
|
129
|
+
*
|
|
130
|
+
* PostgreSQL's `pg` library may return `array_agg` results either as:
|
|
131
|
+
* - A JavaScript array (when type parsers are configured)
|
|
132
|
+
* - A string in PostgreSQL array literal format: `{value1,value2,...}`
|
|
133
|
+
*
|
|
134
|
+
* Handles PostgreSQL's quoting rules for array elements:
|
|
135
|
+
* - Elements containing commas, double quotes, backslashes, or whitespace are double-quoted
|
|
136
|
+
* - Inside quoted elements, `\"` represents `"` and `\\` represents `\`
|
|
137
|
+
*
|
|
138
|
+
* @param value - The value to parse (array or PostgreSQL array string)
|
|
139
|
+
* @returns A string array, or null if the value cannot be parsed
|
|
140
|
+
*/
|
|
141
|
+
function parsePostgresArray(value) {
|
|
142
|
+
if (isStringArray(value)) return value;
|
|
143
|
+
if (typeof value === "string" && value.startsWith("{") && value.endsWith("}")) {
|
|
144
|
+
const inner = value.slice(1, -1);
|
|
145
|
+
if (inner === "") return [];
|
|
146
|
+
return parseArrayElements(inner);
|
|
147
|
+
}
|
|
148
|
+
return null;
|
|
149
|
+
}
|
|
150
|
+
function parseArrayElements(input) {
|
|
151
|
+
const result = [];
|
|
152
|
+
let i = 0;
|
|
153
|
+
while (i < input.length) {
|
|
154
|
+
if (input[i] === ",") {
|
|
155
|
+
i++;
|
|
156
|
+
continue;
|
|
157
|
+
}
|
|
158
|
+
if (input[i] === "\"") {
|
|
159
|
+
i++;
|
|
160
|
+
let element = "";
|
|
161
|
+
while (i < input.length && input[i] !== "\"") {
|
|
162
|
+
if (input[i] === "\\" && i + 1 < input.length) {
|
|
163
|
+
i++;
|
|
164
|
+
element += input[i];
|
|
165
|
+
} else element += input[i];
|
|
166
|
+
i++;
|
|
167
|
+
}
|
|
168
|
+
i++;
|
|
169
|
+
result.push(element);
|
|
170
|
+
} else {
|
|
171
|
+
const nextComma = input.indexOf(",", i);
|
|
172
|
+
if (nextComma === -1) {
|
|
173
|
+
result.push(input.slice(i).trim());
|
|
174
|
+
i = input.length;
|
|
175
|
+
} else {
|
|
176
|
+
result.push(input.slice(i, nextComma).trim());
|
|
177
|
+
i = nextComma;
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
return result;
|
|
182
|
+
}
|
|
183
|
+
/**
|
|
184
|
+
* Extracts enum values from a StorageTypeInstance.
|
|
185
|
+
* Returns null if values are missing or invalid.
|
|
186
|
+
*/
|
|
187
|
+
function getEnumValues(typeInstance) {
|
|
188
|
+
const values = typeInstance.typeParams?.["values"];
|
|
189
|
+
return isStringArray(values) ? values : null;
|
|
190
|
+
}
|
|
191
|
+
/**
|
|
192
|
+
* Reads existing enum values from the schema IR for a given native type.
|
|
193
|
+
* Uses optional chaining to simplify navigation through the annotations structure.
|
|
194
|
+
*/
|
|
195
|
+
function readExistingEnumValues(schema, nativeType) {
|
|
196
|
+
const existing = ((schema.annotations?.["pg"])?.["storageTypes"])?.[nativeType];
|
|
197
|
+
if (!existing || existing.codecId !== PG_ENUM_CODEC_ID) return null;
|
|
198
|
+
return getEnumValues(existing);
|
|
199
|
+
}
|
|
200
|
+
/**
|
|
201
|
+
* Determines what changes are needed to transform existing enum values to desired values.
|
|
202
|
+
*
|
|
203
|
+
* Returns one of:
|
|
204
|
+
* - `unchanged`: No changes needed, values match exactly
|
|
205
|
+
* - `add_values`: New values can be safely appended (PostgreSQL supports this)
|
|
206
|
+
* - `rebuild`: Full enum rebuild required (value removal, reordering, or both)
|
|
207
|
+
*
|
|
208
|
+
* Note: PostgreSQL enums can only have values added (not removed or reordered) without
|
|
209
|
+
* a full type rebuild involving temp type creation and column migration.
|
|
210
|
+
*
|
|
211
|
+
* @param existing - Current enum values in the database
|
|
212
|
+
* @param desired - Target enum values from the contract
|
|
213
|
+
* @returns The type of change required
|
|
214
|
+
*/
|
|
215
|
+
function determineEnumDiff(existing, desired) {
|
|
216
|
+
if (arraysEqual(existing, desired)) return { kind: "unchanged" };
|
|
217
|
+
const existingSet = new Set(existing);
|
|
218
|
+
const desiredSet = new Set(desired);
|
|
219
|
+
const missingValues = desired.filter((value) => !existingSet.has(value));
|
|
220
|
+
const removedValues = existing.filter((value) => !desiredSet.has(value));
|
|
221
|
+
const orderMismatch = missingValues.length === 0 && removedValues.length === 0 && !arraysEqual(existing, desired);
|
|
222
|
+
if (removedValues.length > 0 || orderMismatch) return {
|
|
223
|
+
kind: "rebuild",
|
|
224
|
+
removedValues
|
|
225
|
+
};
|
|
226
|
+
return {
|
|
227
|
+
kind: "add_values",
|
|
228
|
+
values: missingValues
|
|
229
|
+
};
|
|
230
|
+
}
|
|
231
|
+
function enumTypeExistsCheck(schemaName, typeName, exists = true) {
|
|
232
|
+
return `SELECT ${exists ? "EXISTS" : "NOT EXISTS"} (
|
|
233
|
+
SELECT 1
|
|
234
|
+
FROM pg_type t
|
|
235
|
+
JOIN pg_namespace n ON t.typnamespace = n.oid
|
|
236
|
+
WHERE n.nspname = '${escapeLiteral(schemaName)}'
|
|
237
|
+
AND t.typname = '${escapeLiteral(typeName)}'
|
|
238
|
+
)`;
|
|
239
|
+
}
|
|
240
|
+
function buildCreateEnumOperation(typeName, nativeType, schemaName, values) {
|
|
241
|
+
for (const value of values) validateEnumValueLength(value, typeName);
|
|
242
|
+
const literalValues = values.map((value) => `'${escapeLiteral(value)}'`).join(", ");
|
|
243
|
+
const qualifiedType = qualifyName(schemaName, nativeType);
|
|
244
|
+
return {
|
|
245
|
+
id: `type.${typeName}`,
|
|
246
|
+
label: `Create type ${typeName}`,
|
|
247
|
+
summary: `Creates enum type ${typeName}`,
|
|
248
|
+
operationClass: "additive",
|
|
249
|
+
target: { id: "postgres" },
|
|
250
|
+
precheck: [{
|
|
251
|
+
description: `ensure type "${nativeType}" does not exist`,
|
|
252
|
+
sql: enumTypeExistsCheck(schemaName, nativeType, false)
|
|
253
|
+
}],
|
|
254
|
+
execute: [{
|
|
255
|
+
description: `create type "${nativeType}"`,
|
|
256
|
+
sql: `CREATE TYPE ${qualifiedType} AS ENUM (${literalValues})`
|
|
257
|
+
}],
|
|
258
|
+
postcheck: [{
|
|
259
|
+
description: `verify type "${nativeType}" exists`,
|
|
260
|
+
sql: enumTypeExistsCheck(schemaName, nativeType)
|
|
261
|
+
}]
|
|
262
|
+
};
|
|
263
|
+
}
|
|
264
|
+
/**
|
|
265
|
+
* Computes the optimal position for inserting a new enum value to maintain
|
|
266
|
+
* the desired order relative to existing values.
|
|
267
|
+
*
|
|
268
|
+
* PostgreSQL's `ALTER TYPE ADD VALUE` supports BEFORE/AFTER positioning.
|
|
269
|
+
* This function finds the best reference value by:
|
|
270
|
+
* 1. Looking for the nearest preceding value that already exists
|
|
271
|
+
* 2. Falling back to the nearest following value if no preceding exists
|
|
272
|
+
* 3. Defaulting to end-of-list if no reference is found
|
|
273
|
+
*
|
|
274
|
+
* @param options.desired - The target ordered list of all enum values
|
|
275
|
+
* @param options.desiredIndex - Index of the value being inserted in the desired list
|
|
276
|
+
* @param options.current - Current list of enum values (being built up incrementally)
|
|
277
|
+
* @returns SQL clause (e.g., " AFTER 'x'") and insert position for tracking
|
|
278
|
+
*/
|
|
279
|
+
function computeInsertPosition(options) {
|
|
280
|
+
const { desired, desiredIndex, current } = options;
|
|
281
|
+
const currentSet = new Set(current);
|
|
282
|
+
const previous = desired.slice(0, desiredIndex).reverse().find((candidate) => currentSet.has(candidate));
|
|
283
|
+
const next = desired.slice(desiredIndex + 1).find((candidate) => currentSet.has(candidate));
|
|
284
|
+
return {
|
|
285
|
+
clause: previous ? ` AFTER '${escapeLiteral(previous)}'` : next ? ` BEFORE '${escapeLiteral(next)}'` : "",
|
|
286
|
+
insertAt: previous ? current.indexOf(previous) + 1 : next ? current.indexOf(next) : current.length
|
|
287
|
+
};
|
|
288
|
+
}
|
|
289
|
+
/**
|
|
290
|
+
* Builds operations to add new enum values to an existing PostgreSQL enum type.
|
|
291
|
+
*
|
|
292
|
+
* Each new value is added with `ALTER TYPE ... ADD VALUE IF NOT EXISTS` for idempotency.
|
|
293
|
+
* Values are inserted in the correct order using BEFORE/AFTER positioning to match
|
|
294
|
+
* the desired final order.
|
|
295
|
+
*
|
|
296
|
+
* This is a safe, non-destructive operation - existing data is not affected.
|
|
297
|
+
*
|
|
298
|
+
* @param options.typeName - Contract-level type name (e.g., 'Role')
|
|
299
|
+
* @param options.nativeType - PostgreSQL type name (e.g., 'role')
|
|
300
|
+
* @param options.schemaName - PostgreSQL schema (e.g., 'public')
|
|
301
|
+
* @param options.desired - Target ordered list of all enum values
|
|
302
|
+
* @param options.existing - Current enum values in the database
|
|
303
|
+
* @returns Array of migration operations to add each missing value
|
|
304
|
+
*/
|
|
305
|
+
function buildAddValueOperations(options) {
|
|
306
|
+
const { typeName, nativeType, schemaName } = options;
|
|
307
|
+
const current = [...options.existing];
|
|
308
|
+
const currentSet = new Set(current);
|
|
309
|
+
const operations = [];
|
|
310
|
+
for (let index = 0; index < options.desired.length; index += 1) {
|
|
311
|
+
const value = options.desired[index];
|
|
312
|
+
if (value === void 0) continue;
|
|
313
|
+
if (currentSet.has(value)) continue;
|
|
314
|
+
validateEnumValueLength(value, typeName);
|
|
315
|
+
const { clause, insertAt } = computeInsertPosition({
|
|
316
|
+
desired: options.desired,
|
|
317
|
+
desiredIndex: index,
|
|
318
|
+
current
|
|
319
|
+
});
|
|
320
|
+
operations.push({
|
|
321
|
+
id: `type.${typeName}.value.${value}`,
|
|
322
|
+
label: `Add value ${value} to ${typeName}`,
|
|
323
|
+
summary: `Adds enum value ${value} to ${typeName}`,
|
|
324
|
+
operationClass: "widening",
|
|
325
|
+
target: { id: "postgres" },
|
|
326
|
+
precheck: [],
|
|
327
|
+
execute: [{
|
|
328
|
+
description: `add value "${value}" if not exists`,
|
|
329
|
+
sql: `ALTER TYPE ${qualifyName(schemaName, nativeType)} ADD VALUE IF NOT EXISTS '${escapeLiteral(value)}'${clause}`
|
|
330
|
+
}],
|
|
331
|
+
postcheck: []
|
|
332
|
+
});
|
|
333
|
+
current.splice(insertAt, 0, value);
|
|
334
|
+
currentSet.add(value);
|
|
335
|
+
}
|
|
336
|
+
return operations;
|
|
337
|
+
}
|
|
338
|
+
/**
|
|
339
|
+
* Collects columns using the enum type from the contract (desired state).
|
|
340
|
+
* Used for type-safe reference tracking.
|
|
341
|
+
*/
|
|
342
|
+
function collectEnumColumnsFromContract(contract, typeName, nativeType) {
|
|
343
|
+
const columns = [];
|
|
344
|
+
for (const [tableName, table] of Object.entries(contract.storage.tables)) for (const [columnName, column] of Object.entries(table.columns)) if (column.typeRef === typeName || column.nativeType === nativeType && column.codecId === PG_ENUM_CODEC_ID) columns.push({
|
|
345
|
+
table: tableName,
|
|
346
|
+
column: columnName
|
|
347
|
+
});
|
|
348
|
+
return columns;
|
|
349
|
+
}
|
|
350
|
+
/**
|
|
351
|
+
* Collects columns using the enum type from the schema IR (live database state).
|
|
352
|
+
* This ensures we find ALL dependent columns, including those added outside the contract
|
|
353
|
+
* (e.g., manual DDL), which is critical for safe enum rebuild operations.
|
|
354
|
+
*/
|
|
355
|
+
function collectEnumColumnsFromSchema(schema, nativeType) {
|
|
356
|
+
const columns = [];
|
|
357
|
+
for (const [tableName, table] of Object.entries(schema.tables)) for (const [columnName, column] of Object.entries(table.columns)) if (column.nativeType === nativeType) columns.push({
|
|
358
|
+
table: tableName,
|
|
359
|
+
column: columnName
|
|
360
|
+
});
|
|
361
|
+
return columns;
|
|
362
|
+
}
|
|
363
|
+
/**
|
|
364
|
+
* Collects all columns using the enum type from both contract AND live database.
|
|
365
|
+
* Merges and deduplicates to ensure we migrate ALL dependent columns during rebuild.
|
|
366
|
+
*
|
|
367
|
+
* This is critical for data integrity: if a column exists in the database using
|
|
368
|
+
* this enum but is not in the contract (e.g., added via manual DDL), we must
|
|
369
|
+
* still migrate it to avoid DROP TYPE failures.
|
|
370
|
+
*/
|
|
371
|
+
function collectAllEnumColumns(contract, schema, typeName, nativeType) {
|
|
372
|
+
const contractColumns = collectEnumColumnsFromContract(contract, typeName, nativeType);
|
|
373
|
+
const schemaColumns = collectEnumColumnsFromSchema(schema, nativeType);
|
|
374
|
+
const seen = /* @__PURE__ */ new Set();
|
|
375
|
+
const result = [];
|
|
376
|
+
for (const col of [...contractColumns, ...schemaColumns]) {
|
|
377
|
+
const key = `${col.table}.${col.column}`;
|
|
378
|
+
if (!seen.has(key)) {
|
|
379
|
+
seen.add(key);
|
|
380
|
+
result.push(col);
|
|
381
|
+
}
|
|
382
|
+
}
|
|
383
|
+
return result.sort((a, b) => {
|
|
384
|
+
const tableCompare = a.table.localeCompare(b.table);
|
|
385
|
+
return tableCompare !== 0 ? tableCompare : a.column.localeCompare(b.column);
|
|
386
|
+
});
|
|
387
|
+
}
|
|
388
|
+
/**
|
|
389
|
+
* Builds a SQL check to verify a column's type matches an expected type.
|
|
390
|
+
*/
|
|
391
|
+
function columnTypeCheck(options) {
|
|
392
|
+
return `SELECT EXISTS (
|
|
393
|
+
SELECT 1
|
|
394
|
+
FROM information_schema.columns
|
|
395
|
+
WHERE table_schema = '${escapeLiteral(options.schemaName)}'
|
|
396
|
+
AND table_name = '${escapeLiteral(options.tableName)}'
|
|
397
|
+
AND column_name = '${escapeLiteral(options.columnName)}'
|
|
398
|
+
AND udt_name = '${escapeLiteral(options.expectedType)}'
|
|
399
|
+
)`;
|
|
400
|
+
}
|
|
401
|
+
/** PostgreSQL maximum identifier length (NAMEDATALEN - 1) */
|
|
402
|
+
const MAX_IDENTIFIER_LENGTH = 63;
|
|
403
|
+
/** Suffix added to enum type names during rebuild operations */
|
|
404
|
+
const REBUILD_SUFFIX = "__pn_rebuild";
|
|
405
|
+
/**
|
|
406
|
+
* Builds an SQL check to verify no rows contain any of the removed enum values.
|
|
407
|
+
* This prevents data loss during enum rebuild operations.
|
|
408
|
+
*
|
|
409
|
+
* @param schemaName - PostgreSQL schema name
|
|
410
|
+
* @param tableName - Table containing the enum column
|
|
411
|
+
* @param columnName - Column using the enum type
|
|
412
|
+
* @param removedValues - Array of enum values being removed
|
|
413
|
+
* @returns SQL query that returns true if no rows contain removed values
|
|
414
|
+
*/
|
|
415
|
+
function noRemovedValuesExistCheck(schemaName, tableName, columnName, removedValues) {
|
|
416
|
+
if (removedValues.length === 0) return "SELECT true";
|
|
417
|
+
const valuesList = removedValues.map((v) => `'${escapeLiteral(v)}'`).join(", ");
|
|
418
|
+
return `SELECT NOT EXISTS (
|
|
419
|
+
SELECT 1 FROM ${qualifyName(schemaName, tableName)}
|
|
420
|
+
WHERE ${quoteIdentifier(columnName)}::text IN (${valuesList})
|
|
421
|
+
LIMIT 1
|
|
422
|
+
)`;
|
|
423
|
+
}
|
|
424
|
+
/**
|
|
425
|
+
* Builds a migration operation to recreate a PostgreSQL enum type with updated values.
|
|
426
|
+
*
|
|
427
|
+
* This is required when:
|
|
428
|
+
* - Enum values are removed (PostgreSQL doesn't support direct removal)
|
|
429
|
+
* - Enum values are reordered (PostgreSQL doesn't support reordering)
|
|
430
|
+
*
|
|
431
|
+
* The operation:
|
|
432
|
+
* 1. Creates a new enum type with the desired values (temp name)
|
|
433
|
+
* 2. Migrates all columns to use the new type via text cast
|
|
434
|
+
* 3. Drops the original type
|
|
435
|
+
* 4. Renames the temp type to the original name
|
|
436
|
+
*
|
|
437
|
+
* IMPORTANT: If values are being removed and data exists using those values,
|
|
438
|
+
* the operation will fail at the precheck stage with a clear error message.
|
|
439
|
+
* This prevents silent data loss.
|
|
440
|
+
*
|
|
441
|
+
* @param options.typeName - Contract-level type name
|
|
442
|
+
* @param options.nativeType - PostgreSQL type name
|
|
443
|
+
* @param options.schemaName - PostgreSQL schema
|
|
444
|
+
* @param options.values - Desired final enum values
|
|
445
|
+
* @param options.removedValues - Values being removed (for data loss checks)
|
|
446
|
+
* @param options.contract - Full contract for column discovery
|
|
447
|
+
* @param options.schema - Current schema IR for column discovery
|
|
448
|
+
* @returns Migration operation for full enum rebuild
|
|
449
|
+
*/
|
|
450
|
+
function buildRecreateEnumOperation(options) {
|
|
451
|
+
const tempTypeName = `${options.nativeType}${REBUILD_SUFFIX}`;
|
|
452
|
+
if (tempTypeName.length > MAX_IDENTIFIER_LENGTH) {
|
|
453
|
+
const maxBaseLength = MAX_IDENTIFIER_LENGTH - 12;
|
|
454
|
+
throw new Error(`Enum type name "${options.nativeType}" is too long for rebuild operation. Maximum length is ${maxBaseLength} characters (type name + "${REBUILD_SUFFIX}" suffix must fit within PostgreSQL's ${MAX_IDENTIFIER_LENGTH}-character identifier limit).`);
|
|
455
|
+
}
|
|
456
|
+
const qualifiedOriginal = qualifyName(options.schemaName, options.nativeType);
|
|
457
|
+
const qualifiedTemp = qualifyName(options.schemaName, tempTypeName);
|
|
458
|
+
const literalValues = options.values.map((value) => `'${escapeLiteral(value)}'`).join(", ");
|
|
459
|
+
const columnRefs = collectAllEnumColumns(options.contract, options.schema, options.typeName, options.nativeType);
|
|
460
|
+
const alterColumns = columnRefs.map((ref) => ({
|
|
461
|
+
description: `alter ${ref.table}.${ref.column} to ${tempTypeName}`,
|
|
462
|
+
sql: `ALTER TABLE ${qualifyName(options.schemaName, ref.table)}
|
|
463
|
+
ALTER COLUMN ${quoteIdentifier(ref.column)}
|
|
464
|
+
TYPE ${qualifiedTemp}
|
|
465
|
+
USING ${quoteIdentifier(ref.column)}::text::${qualifiedTemp}`
|
|
466
|
+
}));
|
|
467
|
+
const postchecks = [
|
|
468
|
+
{
|
|
469
|
+
description: `verify type "${options.nativeType}" exists`,
|
|
470
|
+
sql: enumTypeExistsCheck(options.schemaName, options.nativeType)
|
|
471
|
+
},
|
|
472
|
+
{
|
|
473
|
+
description: `verify temp type "${tempTypeName}" was removed`,
|
|
474
|
+
sql: enumTypeExistsCheck(options.schemaName, tempTypeName, false)
|
|
475
|
+
},
|
|
476
|
+
...columnRefs.map((ref) => ({
|
|
477
|
+
description: `verify ${ref.table}.${ref.column} uses type "${options.nativeType}"`,
|
|
478
|
+
sql: columnTypeCheck({
|
|
479
|
+
schemaName: options.schemaName,
|
|
480
|
+
tableName: ref.table,
|
|
481
|
+
columnName: ref.column,
|
|
482
|
+
expectedType: options.nativeType
|
|
483
|
+
})
|
|
484
|
+
}))
|
|
485
|
+
];
|
|
486
|
+
return {
|
|
487
|
+
id: `type.${options.typeName}.rebuild`,
|
|
488
|
+
label: `Rebuild type ${options.typeName}`,
|
|
489
|
+
summary: `Recreates enum type ${options.typeName} with updated values`,
|
|
490
|
+
operationClass: "destructive",
|
|
491
|
+
target: { id: "postgres" },
|
|
492
|
+
precheck: [{
|
|
493
|
+
description: `ensure type "${options.nativeType}" exists`,
|
|
494
|
+
sql: enumTypeExistsCheck(options.schemaName, options.nativeType)
|
|
495
|
+
}, ...options.removedValues.length > 0 ? columnRefs.map((ref) => ({
|
|
496
|
+
description: `ensure no rows in ${ref.table}.${ref.column} contain removed values (${options.removedValues.join(", ")})`,
|
|
497
|
+
sql: noRemovedValuesExistCheck(options.schemaName, ref.table, ref.column, options.removedValues)
|
|
498
|
+
})) : []],
|
|
499
|
+
execute: [
|
|
500
|
+
{
|
|
501
|
+
description: `drop orphaned temp type "${tempTypeName}" if exists`,
|
|
502
|
+
sql: `DROP TYPE IF EXISTS ${qualifiedTemp}`
|
|
503
|
+
},
|
|
504
|
+
{
|
|
505
|
+
description: `create temp type "${tempTypeName}"`,
|
|
506
|
+
sql: `CREATE TYPE ${qualifiedTemp} AS ENUM (${literalValues})`
|
|
507
|
+
},
|
|
508
|
+
...alterColumns,
|
|
509
|
+
{
|
|
510
|
+
description: `drop type "${options.nativeType}"`,
|
|
511
|
+
sql: `DROP TYPE ${qualifiedOriginal}`
|
|
512
|
+
},
|
|
513
|
+
{
|
|
514
|
+
description: `rename type "${tempTypeName}" to "${options.nativeType}"`,
|
|
515
|
+
sql: `ALTER TYPE ${qualifiedTemp} RENAME TO ${quoteIdentifier(options.nativeType)}`
|
|
516
|
+
}
|
|
517
|
+
],
|
|
518
|
+
postcheck: postchecks
|
|
519
|
+
};
|
|
520
|
+
}
|
|
521
|
+
/**
|
|
522
|
+
* Postgres enum hooks for planning, verifying, and introspecting `storage.types`.
|
|
523
|
+
*/
|
|
524
|
+
const pgEnumControlHooks = {
|
|
525
|
+
planTypeOperations: ({ typeName, typeInstance, contract, schema, schemaName }) => {
|
|
526
|
+
const desired = getEnumValues(typeInstance);
|
|
527
|
+
if (!desired || desired.length === 0) return { operations: [] };
|
|
528
|
+
const schemaNamespace = schemaName ?? "public";
|
|
529
|
+
const existing = readExistingEnumValues(schema, typeInstance.nativeType);
|
|
530
|
+
if (!existing) return { operations: [buildCreateEnumOperation(typeName, typeInstance.nativeType, schemaNamespace, desired)] };
|
|
531
|
+
const diff = determineEnumDiff(existing, desired);
|
|
532
|
+
if (diff.kind === "unchanged") return { operations: [] };
|
|
533
|
+
if (diff.kind === "rebuild") return { operations: [buildRecreateEnumOperation({
|
|
534
|
+
typeName,
|
|
535
|
+
nativeType: typeInstance.nativeType,
|
|
536
|
+
schemaName: schemaNamespace,
|
|
537
|
+
values: desired,
|
|
538
|
+
removedValues: diff.removedValues,
|
|
539
|
+
contract,
|
|
540
|
+
schema
|
|
541
|
+
})] };
|
|
542
|
+
return { operations: buildAddValueOperations({
|
|
543
|
+
typeName,
|
|
544
|
+
nativeType: typeInstance.nativeType,
|
|
545
|
+
schemaName: schemaNamespace,
|
|
546
|
+
desired,
|
|
547
|
+
existing
|
|
548
|
+
}) };
|
|
549
|
+
},
|
|
550
|
+
verifyType: ({ typeName, typeInstance, schema }) => {
|
|
551
|
+
const desired = getEnumValues(typeInstance);
|
|
552
|
+
if (!desired) return [];
|
|
553
|
+
const existing = readExistingEnumValues(schema, typeInstance.nativeType);
|
|
554
|
+
if (!existing) return [{
|
|
555
|
+
kind: "type_missing",
|
|
556
|
+
table: "",
|
|
557
|
+
typeName,
|
|
558
|
+
message: `Type "${typeName}" is missing from database`
|
|
559
|
+
}];
|
|
560
|
+
if (!arraysEqual(existing, desired)) return [{
|
|
561
|
+
kind: "type_values_mismatch",
|
|
562
|
+
table: "",
|
|
563
|
+
typeName,
|
|
564
|
+
expected: desired.join(", "),
|
|
565
|
+
actual: existing.join(", "),
|
|
566
|
+
message: `Type "${typeName}" values do not match contract`
|
|
567
|
+
}];
|
|
568
|
+
return [];
|
|
569
|
+
},
|
|
570
|
+
introspectTypes: async ({ driver, schemaName }) => {
|
|
571
|
+
const namespace = schemaName ?? "public";
|
|
572
|
+
const result = await driver.query(ENUM_INTROSPECT_QUERY, [namespace]);
|
|
573
|
+
const types = {};
|
|
574
|
+
for (const row of result.rows) {
|
|
575
|
+
const values = parsePostgresArray(row.values);
|
|
576
|
+
if (!values) throw new Error(`Failed to parse enum values for type "${row.type_name}": unexpected format: ${JSON.stringify(row.values)}`);
|
|
577
|
+
types[row.type_name] = {
|
|
578
|
+
codecId: PG_ENUM_CODEC_ID,
|
|
579
|
+
nativeType: row.type_name,
|
|
580
|
+
typeParams: { values }
|
|
581
|
+
};
|
|
582
|
+
}
|
|
583
|
+
return types;
|
|
584
|
+
}
|
|
585
|
+
};
|
|
586
|
+
const MAX_DEPTH = 32;
|
|
587
|
+
function isRecord(value) {
|
|
588
|
+
return typeof value === "object" && value !== null;
|
|
589
|
+
}
|
|
590
|
+
function escapeStringLiteral(str) {
|
|
591
|
+
return str.replace(/\\/g, "\\\\").replace(/'/g, "\\'").replace(/\n/g, "\\n").replace(/\r/g, "\\r");
|
|
592
|
+
}
|
|
593
|
+
function quotePropertyKey(key) {
|
|
594
|
+
return /^[A-Za-z_$][A-Za-z0-9_$]*$/.test(key) ? key : `'${escapeStringLiteral(key)}'`;
|
|
595
|
+
}
|
|
596
|
+
function renderLiteral(value) {
|
|
597
|
+
if (typeof value === "string") return `'${escapeStringLiteral(value)}'`;
|
|
598
|
+
if (typeof value === "number" || typeof value === "boolean") return String(value);
|
|
599
|
+
if (value === null) return "null";
|
|
600
|
+
return "unknown";
|
|
601
|
+
}
|
|
602
|
+
function renderUnion(items, depth) {
|
|
603
|
+
return items.map((item) => render(item, depth)).join(" | ");
|
|
604
|
+
}
|
|
605
|
+
function renderObjectType(schema, depth) {
|
|
606
|
+
const properties = isRecord(schema["properties"]) ? schema["properties"] : {};
|
|
607
|
+
const required = Array.isArray(schema["required"]) ? new Set(schema["required"].filter((key) => typeof key === "string")) : /* @__PURE__ */ new Set();
|
|
608
|
+
const keys = Object.keys(properties).sort((left, right) => left.localeCompare(right));
|
|
609
|
+
if (keys.length === 0) {
|
|
610
|
+
const additionalProperties = schema["additionalProperties"];
|
|
611
|
+
if (additionalProperties === true || additionalProperties === void 0) return "Record<string, unknown>";
|
|
612
|
+
return `Record<string, ${render(additionalProperties, depth)}>`;
|
|
613
|
+
}
|
|
614
|
+
return `{ ${keys.map((key) => {
|
|
615
|
+
const valueSchema = properties[key];
|
|
616
|
+
const optionalMarker = required.has(key) ? "" : "?";
|
|
617
|
+
return `${quotePropertyKey(key)}${optionalMarker}: ${render(valueSchema, depth)}`;
|
|
618
|
+
}).join("; ")} }`;
|
|
619
|
+
}
|
|
620
|
+
function renderArrayType(schema, depth) {
|
|
621
|
+
if (Array.isArray(schema["items"])) return `readonly [${schema["items"].map((item) => render(item, depth)).join(", ")}]`;
|
|
622
|
+
if (schema["items"] !== void 0) {
|
|
623
|
+
const itemType = render(schema["items"], depth);
|
|
624
|
+
return itemType.includes(" | ") || itemType.includes(" & ") ? `(${itemType})[]` : `${itemType}[]`;
|
|
625
|
+
}
|
|
626
|
+
return "unknown[]";
|
|
627
|
+
}
|
|
628
|
+
function render(schema, depth) {
|
|
629
|
+
if (depth > MAX_DEPTH || !isRecord(schema)) return "JsonValue";
|
|
630
|
+
const nextDepth = depth + 1;
|
|
631
|
+
if ("const" in schema) return renderLiteral(schema["const"]);
|
|
632
|
+
if (Array.isArray(schema["enum"])) return schema["enum"].map((value) => renderLiteral(value)).join(" | ");
|
|
633
|
+
if (Array.isArray(schema["oneOf"])) return renderUnion(schema["oneOf"], nextDepth);
|
|
634
|
+
if (Array.isArray(schema["anyOf"])) return renderUnion(schema["anyOf"], nextDepth);
|
|
635
|
+
if (Array.isArray(schema["allOf"])) return schema["allOf"].map((item) => render(item, nextDepth)).join(" & ");
|
|
636
|
+
if (Array.isArray(schema["type"])) return schema["type"].map((item) => render({
|
|
637
|
+
...schema,
|
|
638
|
+
type: item
|
|
639
|
+
}, nextDepth)).join(" | ");
|
|
640
|
+
switch (schema["type"]) {
|
|
641
|
+
case "string": return "string";
|
|
642
|
+
case "number":
|
|
643
|
+
case "integer": return "number";
|
|
644
|
+
case "boolean": return "boolean";
|
|
645
|
+
case "null": return "null";
|
|
646
|
+
case "array": return renderArrayType(schema, nextDepth);
|
|
647
|
+
case "object": return renderObjectType(schema, nextDepth);
|
|
648
|
+
default: break;
|
|
649
|
+
}
|
|
650
|
+
return "JsonValue";
|
|
651
|
+
}
|
|
652
|
+
function renderTypeScriptTypeFromJsonSchema(schema) {
|
|
653
|
+
return render(schema, 0);
|
|
654
|
+
}
|
|
655
|
+
/**
|
|
656
|
+
* Shared utility for expanding parameterized Postgres types to their full SQL representation.
|
|
657
|
+
*
|
|
658
|
+
* This module provides a single source of truth for type expansion logic, used by:
|
|
659
|
+
* - Schema verification (verify-sql-schema.ts) via the expandNativeType codec control hook
|
|
660
|
+
* - Migration planner (planner.ts) via direct import
|
|
661
|
+
*
|
|
662
|
+
* @module
|
|
663
|
+
*/
|
|
664
|
+
/** Set of codec IDs that use the 'length' parameter */
|
|
665
|
+
const LENGTH_CODEC_IDS = new Set([
|
|
666
|
+
SQL_CHAR_CODEC_ID,
|
|
667
|
+
SQL_VARCHAR_CODEC_ID,
|
|
668
|
+
PG_CHAR_CODEC_ID,
|
|
669
|
+
PG_VARCHAR_CODEC_ID,
|
|
670
|
+
PG_BIT_CODEC_ID,
|
|
671
|
+
PG_VARBIT_CODEC_ID
|
|
672
|
+
]);
|
|
673
|
+
/** Set of codec IDs that use the 'precision' parameter for temporal types */
|
|
674
|
+
const TEMPORAL_PRECISION_CODEC_IDS = new Set([
|
|
675
|
+
PG_TIMESTAMP_CODEC_ID,
|
|
676
|
+
PG_TIMESTAMPTZ_CODEC_ID,
|
|
677
|
+
PG_TIME_CODEC_ID,
|
|
678
|
+
PG_TIMETZ_CODEC_ID,
|
|
679
|
+
PG_INTERVAL_CODEC_ID
|
|
680
|
+
]);
|
|
681
|
+
/**
|
|
682
|
+
* Validates that a value is a valid type parameter number.
|
|
683
|
+
* Type parameters must be finite, non-negative integers.
|
|
684
|
+
*/
|
|
685
|
+
function isValidTypeParamNumber(value) {
|
|
686
|
+
return typeof value === "number" && Number.isFinite(value) && Number.isInteger(value) && value >= 0;
|
|
687
|
+
}
|
|
688
|
+
/**
|
|
689
|
+
* Expands a parameterized native type to its full SQL representation.
|
|
690
|
+
*
|
|
691
|
+
* For example:
|
|
692
|
+
* - { nativeType: 'character varying', typeParams: { length: 255 } } -> 'character varying(255)'
|
|
693
|
+
* - { nativeType: 'numeric', typeParams: { precision: 10, scale: 2 } } -> 'numeric(10,2)'
|
|
694
|
+
* - { nativeType: 'timestamp without time zone', typeParams: { precision: 3 } } -> 'timestamp without time zone(3)'
|
|
695
|
+
*
|
|
696
|
+
* Returns the original nativeType if:
|
|
697
|
+
* - No typeParams are provided
|
|
698
|
+
* - No codecId is provided
|
|
699
|
+
* - The codecId is not a known parameterized type
|
|
700
|
+
* - The typeParams values are invalid
|
|
701
|
+
*/
|
|
702
|
+
function expandParameterizedNativeType(input) {
|
|
703
|
+
const { nativeType, codecId, typeParams } = input;
|
|
704
|
+
if (!typeParams || !codecId) return nativeType;
|
|
705
|
+
if (LENGTH_CODEC_IDS.has(codecId)) {
|
|
706
|
+
const length = typeParams["length"];
|
|
707
|
+
if (isValidTypeParamNumber(length)) return `${nativeType}(${length})`;
|
|
708
|
+
return nativeType;
|
|
709
|
+
}
|
|
710
|
+
if (codecId === PG_NUMERIC_CODEC_ID) {
|
|
711
|
+
const precision = typeParams["precision"];
|
|
712
|
+
const scale = typeParams["scale"];
|
|
713
|
+
if (isValidTypeParamNumber(precision)) {
|
|
714
|
+
if (isValidTypeParamNumber(scale)) return `${nativeType}(${precision},${scale})`;
|
|
715
|
+
return `${nativeType}(${precision})`;
|
|
716
|
+
}
|
|
717
|
+
return nativeType;
|
|
718
|
+
}
|
|
719
|
+
if (TEMPORAL_PRECISION_CODEC_IDS.has(codecId)) {
|
|
720
|
+
const precision = typeParams["precision"];
|
|
721
|
+
if (isValidTypeParamNumber(precision)) return `${nativeType}(${precision})`;
|
|
722
|
+
return nativeType;
|
|
723
|
+
}
|
|
724
|
+
return nativeType;
|
|
725
|
+
}
|
|
726
|
+
/** Creates a type import spec for codec types */
|
|
727
|
+
const codecTypeImport = (named) => ({
|
|
728
|
+
package: "@prisma-next/adapter-postgres/codec-types",
|
|
729
|
+
named,
|
|
730
|
+
alias: named
|
|
731
|
+
});
|
|
732
|
+
/** Creates a precision-based TypeScript type renderer for temporal types */
|
|
733
|
+
const precisionRenderer = (typeName) => ({
|
|
734
|
+
kind: "function",
|
|
735
|
+
render: (params) => {
|
|
736
|
+
const precision = params["precision"];
|
|
737
|
+
return typeof precision === "number" ? `${typeName}<${precision}>` : typeName;
|
|
738
|
+
}
|
|
739
|
+
});
|
|
740
|
+
/** Creates control hooks with just expandNativeType for parameterized types */
|
|
741
|
+
const parameterizedTypeHooks = { expandNativeType: expandParameterizedNativeType };
|
|
742
|
+
/**
|
|
743
|
+
* Validates that a type expression string is safe to embed in generated .d.ts files.
|
|
744
|
+
* Rejects expressions containing patterns that could inject executable code.
|
|
745
|
+
*/
|
|
746
|
+
function isSafeTypeExpression(expr) {
|
|
747
|
+
return !/import\s*\(|require\s*\(|declare\s|export\s|eval\s*\(/.test(expr);
|
|
748
|
+
}
|
|
749
|
+
function renderJsonTypeExpression(params) {
|
|
750
|
+
const typeName = params["type"];
|
|
751
|
+
if (typeof typeName === "string" && typeName.trim().length > 0) {
|
|
752
|
+
const trimmed = typeName.trim();
|
|
753
|
+
if (!isSafeTypeExpression(trimmed)) return "JsonValue";
|
|
754
|
+
return trimmed;
|
|
755
|
+
}
|
|
756
|
+
const schema = params["schemaJson"];
|
|
757
|
+
if (schema && typeof schema === "object") {
|
|
758
|
+
const rendered = renderTypeScriptTypeFromJsonSchema(schema);
|
|
759
|
+
if (!isSafeTypeExpression(rendered)) return "JsonValue";
|
|
760
|
+
return rendered;
|
|
761
|
+
}
|
|
762
|
+
return "JsonValue";
|
|
763
|
+
}
|
|
764
|
+
const postgresAdapterDescriptorMeta = {
|
|
765
|
+
kind: "adapter",
|
|
766
|
+
familyId: "sql",
|
|
767
|
+
targetId: "postgres",
|
|
768
|
+
id: "postgres",
|
|
769
|
+
version: "0.0.1",
|
|
770
|
+
capabilities: {
|
|
771
|
+
postgres: {
|
|
772
|
+
orderBy: true,
|
|
773
|
+
limit: true,
|
|
774
|
+
lateral: true,
|
|
775
|
+
jsonAgg: true,
|
|
776
|
+
returning: true
|
|
777
|
+
},
|
|
778
|
+
sql: { enums: true }
|
|
779
|
+
},
|
|
780
|
+
types: {
|
|
781
|
+
codecTypes: {
|
|
782
|
+
import: {
|
|
783
|
+
package: "@prisma-next/adapter-postgres/codec-types",
|
|
784
|
+
named: "CodecTypes",
|
|
785
|
+
alias: "PgTypes"
|
|
786
|
+
},
|
|
787
|
+
parameterized: {
|
|
788
|
+
[SQL_CHAR_CODEC_ID]: "Char<{{length}}>",
|
|
789
|
+
[SQL_VARCHAR_CODEC_ID]: "Varchar<{{length}}>",
|
|
790
|
+
[PG_CHAR_CODEC_ID]: "Char<{{length}}>",
|
|
791
|
+
[PG_VARCHAR_CODEC_ID]: "Varchar<{{length}}>",
|
|
792
|
+
[PG_NUMERIC_CODEC_ID]: {
|
|
793
|
+
kind: "function",
|
|
794
|
+
render: (params) => {
|
|
795
|
+
const precision = params["precision"];
|
|
796
|
+
if (typeof precision !== "number") throw new Error("pg/numeric@1 renderer expects precision");
|
|
797
|
+
const scale = params["scale"];
|
|
798
|
+
return typeof scale === "number" ? `Numeric<${precision}, ${scale}>` : `Numeric<${precision}>`;
|
|
799
|
+
}
|
|
800
|
+
},
|
|
801
|
+
[PG_BIT_CODEC_ID]: "Bit<{{length}}>",
|
|
802
|
+
[PG_VARBIT_CODEC_ID]: "VarBit<{{length}}>",
|
|
803
|
+
[PG_TIMESTAMP_CODEC_ID]: precisionRenderer("Timestamp"),
|
|
804
|
+
[PG_TIMESTAMPTZ_CODEC_ID]: precisionRenderer("Timestamptz"),
|
|
805
|
+
[PG_TIME_CODEC_ID]: precisionRenderer("Time"),
|
|
806
|
+
[PG_TIMETZ_CODEC_ID]: precisionRenderer("Timetz"),
|
|
807
|
+
[PG_INTERVAL_CODEC_ID]: precisionRenderer("Interval"),
|
|
808
|
+
[PG_ENUM_CODEC_ID]: {
|
|
809
|
+
kind: "function",
|
|
810
|
+
render: (params) => {
|
|
811
|
+
const values = params["values"];
|
|
812
|
+
if (!Array.isArray(values)) throw new Error("pg/enum@1 renderer expects values array");
|
|
813
|
+
return values.map((value) => `'${String(value).replace(/'/g, "\\'")}'`).join(" | ");
|
|
814
|
+
}
|
|
815
|
+
},
|
|
816
|
+
[PG_JSON_CODEC_ID]: {
|
|
817
|
+
kind: "function",
|
|
818
|
+
render: renderJsonTypeExpression
|
|
819
|
+
},
|
|
820
|
+
[PG_JSONB_CODEC_ID]: {
|
|
821
|
+
kind: "function",
|
|
822
|
+
render: renderJsonTypeExpression
|
|
823
|
+
}
|
|
824
|
+
},
|
|
825
|
+
typeImports: [
|
|
826
|
+
{
|
|
827
|
+
package: "@prisma-next/adapter-postgres/codec-types",
|
|
828
|
+
named: "JsonValue",
|
|
829
|
+
alias: "JsonValue"
|
|
830
|
+
},
|
|
831
|
+
codecTypeImport("Char"),
|
|
832
|
+
codecTypeImport("Varchar"),
|
|
833
|
+
codecTypeImport("Numeric"),
|
|
834
|
+
codecTypeImport("Bit"),
|
|
835
|
+
codecTypeImport("VarBit"),
|
|
836
|
+
codecTypeImport("Timestamp"),
|
|
837
|
+
codecTypeImport("Timestamptz"),
|
|
838
|
+
codecTypeImport("Time"),
|
|
839
|
+
codecTypeImport("Timetz"),
|
|
840
|
+
codecTypeImport("Interval")
|
|
841
|
+
],
|
|
842
|
+
controlPlaneHooks: {
|
|
843
|
+
[SQL_CHAR_CODEC_ID]: parameterizedTypeHooks,
|
|
844
|
+
[SQL_VARCHAR_CODEC_ID]: parameterizedTypeHooks,
|
|
845
|
+
[PG_CHAR_CODEC_ID]: parameterizedTypeHooks,
|
|
846
|
+
[PG_VARCHAR_CODEC_ID]: parameterizedTypeHooks,
|
|
847
|
+
[PG_NUMERIC_CODEC_ID]: parameterizedTypeHooks,
|
|
848
|
+
[PG_BIT_CODEC_ID]: parameterizedTypeHooks,
|
|
849
|
+
[PG_VARBIT_CODEC_ID]: parameterizedTypeHooks,
|
|
850
|
+
[PG_TIMESTAMP_CODEC_ID]: parameterizedTypeHooks,
|
|
851
|
+
[PG_TIMESTAMPTZ_CODEC_ID]: parameterizedTypeHooks,
|
|
852
|
+
[PG_TIME_CODEC_ID]: parameterizedTypeHooks,
|
|
853
|
+
[PG_TIMETZ_CODEC_ID]: parameterizedTypeHooks,
|
|
854
|
+
[PG_INTERVAL_CODEC_ID]: parameterizedTypeHooks,
|
|
855
|
+
[PG_ENUM_CODEC_ID]: pgEnumControlHooks
|
|
856
|
+
}
|
|
857
|
+
},
|
|
858
|
+
storage: [
|
|
859
|
+
{
|
|
860
|
+
typeId: PG_TEXT_CODEC_ID,
|
|
861
|
+
familyId: "sql",
|
|
862
|
+
targetId: "postgres",
|
|
863
|
+
nativeType: "text"
|
|
864
|
+
},
|
|
865
|
+
{
|
|
866
|
+
typeId: SQL_CHAR_CODEC_ID,
|
|
867
|
+
familyId: "sql",
|
|
868
|
+
targetId: "postgres",
|
|
869
|
+
nativeType: "character"
|
|
870
|
+
},
|
|
871
|
+
{
|
|
872
|
+
typeId: SQL_VARCHAR_CODEC_ID,
|
|
873
|
+
familyId: "sql",
|
|
874
|
+
targetId: "postgres",
|
|
875
|
+
nativeType: "character varying"
|
|
876
|
+
},
|
|
877
|
+
{
|
|
878
|
+
typeId: SQL_INT_CODEC_ID,
|
|
879
|
+
familyId: "sql",
|
|
880
|
+
targetId: "postgres",
|
|
881
|
+
nativeType: "int4"
|
|
882
|
+
},
|
|
883
|
+
{
|
|
884
|
+
typeId: SQL_FLOAT_CODEC_ID,
|
|
885
|
+
familyId: "sql",
|
|
886
|
+
targetId: "postgres",
|
|
887
|
+
nativeType: "float8"
|
|
888
|
+
},
|
|
889
|
+
{
|
|
890
|
+
typeId: PG_CHAR_CODEC_ID,
|
|
891
|
+
familyId: "sql",
|
|
892
|
+
targetId: "postgres",
|
|
893
|
+
nativeType: "character"
|
|
894
|
+
},
|
|
895
|
+
{
|
|
896
|
+
typeId: PG_VARCHAR_CODEC_ID,
|
|
897
|
+
familyId: "sql",
|
|
898
|
+
targetId: "postgres",
|
|
899
|
+
nativeType: "character varying"
|
|
900
|
+
},
|
|
901
|
+
{
|
|
902
|
+
typeId: PG_INT_CODEC_ID,
|
|
903
|
+
familyId: "sql",
|
|
904
|
+
targetId: "postgres",
|
|
905
|
+
nativeType: "int4"
|
|
906
|
+
},
|
|
907
|
+
{
|
|
908
|
+
typeId: PG_FLOAT_CODEC_ID,
|
|
909
|
+
familyId: "sql",
|
|
910
|
+
targetId: "postgres",
|
|
911
|
+
nativeType: "float8"
|
|
912
|
+
},
|
|
913
|
+
{
|
|
914
|
+
typeId: PG_INT4_CODEC_ID,
|
|
915
|
+
familyId: "sql",
|
|
916
|
+
targetId: "postgres",
|
|
917
|
+
nativeType: "int4"
|
|
918
|
+
},
|
|
919
|
+
{
|
|
920
|
+
typeId: PG_INT2_CODEC_ID,
|
|
921
|
+
familyId: "sql",
|
|
922
|
+
targetId: "postgres",
|
|
923
|
+
nativeType: "int2"
|
|
924
|
+
},
|
|
925
|
+
{
|
|
926
|
+
typeId: PG_INT8_CODEC_ID,
|
|
927
|
+
familyId: "sql",
|
|
928
|
+
targetId: "postgres",
|
|
929
|
+
nativeType: "int8"
|
|
930
|
+
},
|
|
931
|
+
{
|
|
932
|
+
typeId: PG_FLOAT4_CODEC_ID,
|
|
933
|
+
familyId: "sql",
|
|
934
|
+
targetId: "postgres",
|
|
935
|
+
nativeType: "float4"
|
|
936
|
+
},
|
|
937
|
+
{
|
|
938
|
+
typeId: PG_FLOAT8_CODEC_ID,
|
|
939
|
+
familyId: "sql",
|
|
940
|
+
targetId: "postgres",
|
|
941
|
+
nativeType: "float8"
|
|
942
|
+
},
|
|
943
|
+
{
|
|
944
|
+
typeId: PG_NUMERIC_CODEC_ID,
|
|
945
|
+
familyId: "sql",
|
|
946
|
+
targetId: "postgres",
|
|
947
|
+
nativeType: "numeric"
|
|
948
|
+
},
|
|
949
|
+
{
|
|
950
|
+
typeId: PG_TIMESTAMP_CODEC_ID,
|
|
951
|
+
familyId: "sql",
|
|
952
|
+
targetId: "postgres",
|
|
953
|
+
nativeType: "timestamp"
|
|
954
|
+
},
|
|
955
|
+
{
|
|
956
|
+
typeId: PG_TIMESTAMPTZ_CODEC_ID,
|
|
957
|
+
familyId: "sql",
|
|
958
|
+
targetId: "postgres",
|
|
959
|
+
nativeType: "timestamptz"
|
|
960
|
+
},
|
|
961
|
+
{
|
|
962
|
+
typeId: PG_TIME_CODEC_ID,
|
|
963
|
+
familyId: "sql",
|
|
964
|
+
targetId: "postgres",
|
|
965
|
+
nativeType: "time"
|
|
966
|
+
},
|
|
967
|
+
{
|
|
968
|
+
typeId: PG_TIMETZ_CODEC_ID,
|
|
969
|
+
familyId: "sql",
|
|
970
|
+
targetId: "postgres",
|
|
971
|
+
nativeType: "timetz"
|
|
972
|
+
},
|
|
973
|
+
{
|
|
974
|
+
typeId: PG_BOOL_CODEC_ID,
|
|
975
|
+
familyId: "sql",
|
|
976
|
+
targetId: "postgres",
|
|
977
|
+
nativeType: "bool"
|
|
978
|
+
},
|
|
979
|
+
{
|
|
980
|
+
typeId: PG_BIT_CODEC_ID,
|
|
981
|
+
familyId: "sql",
|
|
982
|
+
targetId: "postgres",
|
|
983
|
+
nativeType: "bit"
|
|
984
|
+
},
|
|
985
|
+
{
|
|
986
|
+
typeId: PG_VARBIT_CODEC_ID,
|
|
987
|
+
familyId: "sql",
|
|
988
|
+
targetId: "postgres",
|
|
989
|
+
nativeType: "bit varying"
|
|
990
|
+
},
|
|
991
|
+
{
|
|
992
|
+
typeId: PG_INTERVAL_CODEC_ID,
|
|
993
|
+
familyId: "sql",
|
|
994
|
+
targetId: "postgres",
|
|
995
|
+
nativeType: "interval"
|
|
996
|
+
},
|
|
997
|
+
{
|
|
998
|
+
typeId: PG_JSON_CODEC_ID,
|
|
999
|
+
familyId: "sql",
|
|
1000
|
+
targetId: "postgres",
|
|
1001
|
+
nativeType: "json"
|
|
1002
|
+
},
|
|
1003
|
+
{
|
|
1004
|
+
typeId: PG_JSONB_CODEC_ID,
|
|
1005
|
+
familyId: "sql",
|
|
1006
|
+
targetId: "postgres",
|
|
1007
|
+
nativeType: "jsonb"
|
|
1008
|
+
}
|
|
1009
|
+
]
|
|
1010
|
+
}
|
|
1011
|
+
};
|
|
1012
|
+
|
|
1013
|
+
//#endregion
|
|
1014
|
+
//#region ../../6-adapters/postgres/dist/control.mjs
|
|
1015
|
+
/**
|
|
1016
|
+
* Pre-compiled regex patterns for performance.
|
|
1017
|
+
* These are compiled once at module load time rather than on each function call.
|
|
1018
|
+
*/
|
|
1019
|
+
const NEXTVAL_PATTERN = /^nextval\s*\(/i;
|
|
1020
|
+
const TIMESTAMP_PATTERN = /^(now\s*\(\s*\)|CURRENT_TIMESTAMP|clock_timestamp\s*\(\s*\))$/i;
|
|
1021
|
+
const UUID_PATTERN = /^gen_random_uuid\s*\(\s*\)$/i;
|
|
1022
|
+
const UUID_OSSP_PATTERN = /^uuid_generate_v4\s*\(\s*\)$/i;
|
|
1023
|
+
const TRUE_PATTERN = /^true$/i;
|
|
1024
|
+
const FALSE_PATTERN = /^false$/i;
|
|
1025
|
+
const NUMERIC_PATTERN = /^-?\d+(\.\d+)?$/;
|
|
1026
|
+
const STRING_LITERAL_PATTERN = /^'((?:[^']|'')*)'(?:::(?:"[^"]+"|[\w\s]+)(?:\(\d+\))?)?$/;
|
|
1027
|
+
/**
|
|
1028
|
+
* Parses a raw Postgres column default expression into a normalized ColumnDefault.
|
|
1029
|
+
* This enables semantic comparison between contract defaults and introspected schema defaults.
|
|
1030
|
+
*
|
|
1031
|
+
* Used by the migration diff layer to normalize raw database defaults during comparison,
|
|
1032
|
+
* keeping the introspection layer focused on faithful data capture.
|
|
1033
|
+
*
|
|
1034
|
+
* @param rawDefault - Raw default expression from information_schema.columns.column_default
|
|
1035
|
+
* @param _nativeType - Native column type (currently unused, reserved for future type-aware parsing)
|
|
1036
|
+
* @returns Normalized ColumnDefault or undefined if the expression cannot be parsed
|
|
1037
|
+
*/
|
|
1038
|
+
function parsePostgresDefault(rawDefault, _nativeType) {
|
|
1039
|
+
const trimmed = rawDefault.trim();
|
|
1040
|
+
if (NEXTVAL_PATTERN.test(trimmed)) return {
|
|
1041
|
+
kind: "function",
|
|
1042
|
+
expression: "autoincrement()"
|
|
1043
|
+
};
|
|
1044
|
+
if (TIMESTAMP_PATTERN.test(trimmed)) return {
|
|
1045
|
+
kind: "function",
|
|
1046
|
+
expression: "now()"
|
|
1047
|
+
};
|
|
1048
|
+
if (UUID_PATTERN.test(trimmed)) return {
|
|
1049
|
+
kind: "function",
|
|
1050
|
+
expression: "gen_random_uuid()"
|
|
1051
|
+
};
|
|
1052
|
+
if (UUID_OSSP_PATTERN.test(trimmed)) return {
|
|
1053
|
+
kind: "function",
|
|
1054
|
+
expression: "gen_random_uuid()"
|
|
1055
|
+
};
|
|
1056
|
+
if (TRUE_PATTERN.test(trimmed)) return {
|
|
1057
|
+
kind: "literal",
|
|
1058
|
+
expression: "true"
|
|
1059
|
+
};
|
|
1060
|
+
if (FALSE_PATTERN.test(trimmed)) return {
|
|
1061
|
+
kind: "literal",
|
|
1062
|
+
expression: "false"
|
|
1063
|
+
};
|
|
1064
|
+
if (NUMERIC_PATTERN.test(trimmed)) return {
|
|
1065
|
+
kind: "literal",
|
|
1066
|
+
expression: trimmed
|
|
1067
|
+
};
|
|
1068
|
+
const stringMatch = trimmed.match(STRING_LITERAL_PATTERN);
|
|
1069
|
+
if (stringMatch?.[1] !== void 0) return {
|
|
1070
|
+
kind: "literal",
|
|
1071
|
+
expression: `'${stringMatch[1]}'`
|
|
1072
|
+
};
|
|
1073
|
+
return {
|
|
1074
|
+
kind: "function",
|
|
1075
|
+
expression: trimmed
|
|
1076
|
+
};
|
|
1077
|
+
}
|
|
1078
|
+
/**
|
|
1079
|
+
* Postgres control plane adapter for control-plane operations like introspection.
|
|
1080
|
+
* Provides target-specific implementations for control-plane domain actions.
|
|
1081
|
+
*/
|
|
1082
|
+
var PostgresControlAdapter = class {
|
|
1083
|
+
familyId = "sql";
|
|
1084
|
+
targetId = "postgres";
|
|
1085
|
+
/**
|
|
1086
|
+
* @deprecated Use targetId instead
|
|
1087
|
+
*/
|
|
1088
|
+
target = "postgres";
|
|
1089
|
+
/**
|
|
1090
|
+
* Target-specific normalizer for raw Postgres default expressions.
|
|
1091
|
+
* Used by schema verification to normalize raw defaults before comparison.
|
|
1092
|
+
*/
|
|
1093
|
+
normalizeDefault = parsePostgresDefault;
|
|
1094
|
+
/**
|
|
1095
|
+
* Target-specific normalizer for Postgres schema native type names.
|
|
1096
|
+
* Used by schema verification to normalize introspected type names
|
|
1097
|
+
* before comparison with contract native types.
|
|
1098
|
+
*/
|
|
1099
|
+
normalizeNativeType = normalizeSchemaNativeType;
|
|
1100
|
+
/**
|
|
1101
|
+
* Introspects a Postgres database schema and returns a raw SqlSchemaIR.
|
|
1102
|
+
*
|
|
1103
|
+
* This is a pure schema discovery operation that queries the Postgres catalog
|
|
1104
|
+
* and returns the schema structure without type mapping or contract enrichment.
|
|
1105
|
+
* Type mapping and enrichment are handled separately by enrichment helpers.
|
|
1106
|
+
*
|
|
1107
|
+
* Uses batched queries to minimize database round trips (7 queries instead of 5T+3).
|
|
1108
|
+
*
|
|
1109
|
+
* @param driver - ControlDriverInstance<'sql', 'postgres'> instance for executing queries
|
|
1110
|
+
* @param contractIR - Optional contract IR for contract-guided introspection (filtering, optimization)
|
|
1111
|
+
* @param schema - Schema name to introspect (defaults to 'public')
|
|
1112
|
+
* @returns Promise resolving to SqlSchemaIR representing the live database schema
|
|
1113
|
+
*/
|
|
1114
|
+
async introspect(driver, _contractIR, schema = "public") {
|
|
1115
|
+
const [tablesResult, columnsResult, pkResult, fkResult, uniqueResult, indexResult, extensionsResult] = await Promise.all([
|
|
1116
|
+
driver.query(`SELECT table_name
|
|
1117
|
+
FROM information_schema.tables
|
|
1118
|
+
WHERE table_schema = $1
|
|
1119
|
+
AND table_type = 'BASE TABLE'
|
|
1120
|
+
ORDER BY table_name`, [schema]),
|
|
1121
|
+
driver.query(`SELECT
|
|
1122
|
+
c.table_name,
|
|
1123
|
+
column_name,
|
|
1124
|
+
data_type,
|
|
1125
|
+
udt_name,
|
|
1126
|
+
is_nullable,
|
|
1127
|
+
character_maximum_length,
|
|
1128
|
+
numeric_precision,
|
|
1129
|
+
numeric_scale,
|
|
1130
|
+
column_default,
|
|
1131
|
+
format_type(a.atttypid, a.atttypmod) AS formatted_type
|
|
1132
|
+
FROM information_schema.columns c
|
|
1133
|
+
JOIN pg_catalog.pg_class cl
|
|
1134
|
+
ON cl.relname = c.table_name
|
|
1135
|
+
JOIN pg_catalog.pg_namespace ns
|
|
1136
|
+
ON ns.nspname = c.table_schema
|
|
1137
|
+
AND ns.oid = cl.relnamespace
|
|
1138
|
+
JOIN pg_catalog.pg_attribute a
|
|
1139
|
+
ON a.attrelid = cl.oid
|
|
1140
|
+
AND a.attname = c.column_name
|
|
1141
|
+
AND a.attnum > 0
|
|
1142
|
+
AND NOT a.attisdropped
|
|
1143
|
+
WHERE c.table_schema = $1
|
|
1144
|
+
ORDER BY c.table_name, c.ordinal_position`, [schema]),
|
|
1145
|
+
driver.query(`SELECT
|
|
1146
|
+
tc.table_name,
|
|
1147
|
+
tc.constraint_name,
|
|
1148
|
+
kcu.column_name,
|
|
1149
|
+
kcu.ordinal_position
|
|
1150
|
+
FROM information_schema.table_constraints tc
|
|
1151
|
+
JOIN information_schema.key_column_usage kcu
|
|
1152
|
+
ON tc.constraint_name = kcu.constraint_name
|
|
1153
|
+
AND tc.table_schema = kcu.table_schema
|
|
1154
|
+
AND tc.table_name = kcu.table_name
|
|
1155
|
+
WHERE tc.table_schema = $1
|
|
1156
|
+
AND tc.constraint_type = 'PRIMARY KEY'
|
|
1157
|
+
ORDER BY tc.table_name, kcu.ordinal_position`, [schema]),
|
|
1158
|
+
driver.query(`SELECT
|
|
1159
|
+
tc.table_name,
|
|
1160
|
+
tc.constraint_name,
|
|
1161
|
+
kcu.column_name,
|
|
1162
|
+
kcu.ordinal_position,
|
|
1163
|
+
ccu.table_schema AS referenced_table_schema,
|
|
1164
|
+
ccu.table_name AS referenced_table_name,
|
|
1165
|
+
ccu.column_name AS referenced_column_name
|
|
1166
|
+
FROM information_schema.table_constraints tc
|
|
1167
|
+
JOIN information_schema.key_column_usage kcu
|
|
1168
|
+
ON tc.constraint_name = kcu.constraint_name
|
|
1169
|
+
AND tc.table_schema = kcu.table_schema
|
|
1170
|
+
AND tc.table_name = kcu.table_name
|
|
1171
|
+
JOIN information_schema.constraint_column_usage ccu
|
|
1172
|
+
ON ccu.constraint_name = tc.constraint_name
|
|
1173
|
+
AND ccu.table_schema = tc.table_schema
|
|
1174
|
+
WHERE tc.table_schema = $1
|
|
1175
|
+
AND tc.constraint_type = 'FOREIGN KEY'
|
|
1176
|
+
ORDER BY tc.table_name, tc.constraint_name, kcu.ordinal_position`, [schema]),
|
|
1177
|
+
driver.query(`SELECT
|
|
1178
|
+
tc.table_name,
|
|
1179
|
+
tc.constraint_name,
|
|
1180
|
+
kcu.column_name,
|
|
1181
|
+
kcu.ordinal_position
|
|
1182
|
+
FROM information_schema.table_constraints tc
|
|
1183
|
+
JOIN information_schema.key_column_usage kcu
|
|
1184
|
+
ON tc.constraint_name = kcu.constraint_name
|
|
1185
|
+
AND tc.table_schema = kcu.table_schema
|
|
1186
|
+
AND tc.table_name = kcu.table_name
|
|
1187
|
+
WHERE tc.table_schema = $1
|
|
1188
|
+
AND tc.constraint_type = 'UNIQUE'
|
|
1189
|
+
ORDER BY tc.table_name, tc.constraint_name, kcu.ordinal_position`, [schema]),
|
|
1190
|
+
driver.query(`SELECT
|
|
1191
|
+
i.tablename,
|
|
1192
|
+
i.indexname,
|
|
1193
|
+
ix.indisunique,
|
|
1194
|
+
a.attname,
|
|
1195
|
+
a.attnum
|
|
1196
|
+
FROM pg_indexes i
|
|
1197
|
+
JOIN pg_class ic ON ic.relname = i.indexname
|
|
1198
|
+
JOIN pg_namespace ins ON ins.oid = ic.relnamespace AND ins.nspname = $1
|
|
1199
|
+
JOIN pg_index ix ON ix.indexrelid = ic.oid
|
|
1200
|
+
JOIN pg_class t ON t.oid = ix.indrelid
|
|
1201
|
+
JOIN pg_namespace tn ON tn.oid = t.relnamespace AND tn.nspname = $1
|
|
1202
|
+
LEFT JOIN pg_attribute a ON a.attrelid = t.oid AND a.attnum = ANY(ix.indkey) AND a.attnum > 0
|
|
1203
|
+
WHERE i.schemaname = $1
|
|
1204
|
+
AND NOT EXISTS (
|
|
1205
|
+
SELECT 1
|
|
1206
|
+
FROM information_schema.table_constraints tc
|
|
1207
|
+
WHERE tc.table_schema = $1
|
|
1208
|
+
AND tc.table_name = i.tablename
|
|
1209
|
+
AND tc.constraint_name = i.indexname
|
|
1210
|
+
)
|
|
1211
|
+
ORDER BY i.tablename, i.indexname, a.attnum`, [schema]),
|
|
1212
|
+
driver.query(`SELECT extname
|
|
1213
|
+
FROM pg_extension
|
|
1214
|
+
ORDER BY extname`, [])
|
|
1215
|
+
]);
|
|
1216
|
+
const columnsByTable = groupBy(columnsResult.rows, "table_name");
|
|
1217
|
+
const pksByTable = groupBy(pkResult.rows, "table_name");
|
|
1218
|
+
const fksByTable = groupBy(fkResult.rows, "table_name");
|
|
1219
|
+
const uniquesByTable = groupBy(uniqueResult.rows, "table_name");
|
|
1220
|
+
const indexesByTable = groupBy(indexResult.rows, "tablename");
|
|
1221
|
+
const pkConstraintsByTable = /* @__PURE__ */ new Map();
|
|
1222
|
+
for (const row of pkResult.rows) {
|
|
1223
|
+
let constraints = pkConstraintsByTable.get(row.table_name);
|
|
1224
|
+
if (!constraints) {
|
|
1225
|
+
constraints = /* @__PURE__ */ new Set();
|
|
1226
|
+
pkConstraintsByTable.set(row.table_name, constraints);
|
|
1227
|
+
}
|
|
1228
|
+
constraints.add(row.constraint_name);
|
|
1229
|
+
}
|
|
1230
|
+
const tables = {};
|
|
1231
|
+
for (const tableRow of tablesResult.rows) {
|
|
1232
|
+
const tableName = tableRow.table_name;
|
|
1233
|
+
const columns = {};
|
|
1234
|
+
for (const colRow of columnsByTable.get(tableName) ?? []) {
|
|
1235
|
+
let nativeType = colRow.udt_name;
|
|
1236
|
+
const formattedType = colRow.formatted_type ? normalizeFormattedType(colRow.formatted_type, colRow.data_type, colRow.udt_name) : null;
|
|
1237
|
+
if (formattedType) nativeType = formattedType;
|
|
1238
|
+
else if (colRow.data_type === "character varying" || colRow.data_type === "character") if (colRow.character_maximum_length) nativeType = `${colRow.data_type}(${colRow.character_maximum_length})`;
|
|
1239
|
+
else nativeType = colRow.data_type;
|
|
1240
|
+
else if (colRow.data_type === "numeric" || colRow.data_type === "decimal") if (colRow.numeric_precision && colRow.numeric_scale !== null) nativeType = `${colRow.data_type}(${colRow.numeric_precision},${colRow.numeric_scale})`;
|
|
1241
|
+
else if (colRow.numeric_precision) nativeType = `${colRow.data_type}(${colRow.numeric_precision})`;
|
|
1242
|
+
else nativeType = colRow.data_type;
|
|
1243
|
+
else nativeType = colRow.udt_name || colRow.data_type;
|
|
1244
|
+
columns[colRow.column_name] = {
|
|
1245
|
+
name: colRow.column_name,
|
|
1246
|
+
nativeType,
|
|
1247
|
+
nullable: colRow.is_nullable === "YES",
|
|
1248
|
+
...ifDefined("default", colRow.column_default ?? void 0)
|
|
1249
|
+
};
|
|
1250
|
+
}
|
|
1251
|
+
const pkRows = [...pksByTable.get(tableName) ?? []];
|
|
1252
|
+
const primaryKeyColumns = pkRows.sort((a, b) => a.ordinal_position - b.ordinal_position).map((row) => row.column_name);
|
|
1253
|
+
const primaryKey = primaryKeyColumns.length > 0 ? {
|
|
1254
|
+
columns: primaryKeyColumns,
|
|
1255
|
+
...pkRows[0]?.constraint_name ? { name: pkRows[0].constraint_name } : {}
|
|
1256
|
+
} : void 0;
|
|
1257
|
+
const foreignKeysMap = /* @__PURE__ */ new Map();
|
|
1258
|
+
for (const fkRow of fksByTable.get(tableName) ?? []) {
|
|
1259
|
+
const existing = foreignKeysMap.get(fkRow.constraint_name);
|
|
1260
|
+
if (existing) {
|
|
1261
|
+
existing.columns.push(fkRow.column_name);
|
|
1262
|
+
existing.referencedColumns.push(fkRow.referenced_column_name);
|
|
1263
|
+
} else foreignKeysMap.set(fkRow.constraint_name, {
|
|
1264
|
+
columns: [fkRow.column_name],
|
|
1265
|
+
referencedTable: fkRow.referenced_table_name,
|
|
1266
|
+
referencedColumns: [fkRow.referenced_column_name],
|
|
1267
|
+
name: fkRow.constraint_name
|
|
1268
|
+
});
|
|
1269
|
+
}
|
|
1270
|
+
const foreignKeys = Array.from(foreignKeysMap.values()).map((fk) => ({
|
|
1271
|
+
columns: Object.freeze([...fk.columns]),
|
|
1272
|
+
referencedTable: fk.referencedTable,
|
|
1273
|
+
referencedColumns: Object.freeze([...fk.referencedColumns]),
|
|
1274
|
+
name: fk.name
|
|
1275
|
+
}));
|
|
1276
|
+
const pkConstraints = pkConstraintsByTable.get(tableName) ?? /* @__PURE__ */ new Set();
|
|
1277
|
+
const uniquesMap = /* @__PURE__ */ new Map();
|
|
1278
|
+
for (const uniqueRow of uniquesByTable.get(tableName) ?? []) {
|
|
1279
|
+
if (pkConstraints.has(uniqueRow.constraint_name)) continue;
|
|
1280
|
+
const existing = uniquesMap.get(uniqueRow.constraint_name);
|
|
1281
|
+
if (existing) existing.columns.push(uniqueRow.column_name);
|
|
1282
|
+
else uniquesMap.set(uniqueRow.constraint_name, {
|
|
1283
|
+
columns: [uniqueRow.column_name],
|
|
1284
|
+
name: uniqueRow.constraint_name
|
|
1285
|
+
});
|
|
1286
|
+
}
|
|
1287
|
+
const uniques = Array.from(uniquesMap.values()).map((uq) => ({
|
|
1288
|
+
columns: Object.freeze([...uq.columns]),
|
|
1289
|
+
name: uq.name
|
|
1290
|
+
}));
|
|
1291
|
+
const indexesMap = /* @__PURE__ */ new Map();
|
|
1292
|
+
for (const idxRow of indexesByTable.get(tableName) ?? []) {
|
|
1293
|
+
if (!idxRow.attname) continue;
|
|
1294
|
+
const existing = indexesMap.get(idxRow.indexname);
|
|
1295
|
+
if (existing) existing.columns.push(idxRow.attname);
|
|
1296
|
+
else indexesMap.set(idxRow.indexname, {
|
|
1297
|
+
columns: [idxRow.attname],
|
|
1298
|
+
name: idxRow.indexname,
|
|
1299
|
+
unique: idxRow.indisunique
|
|
1300
|
+
});
|
|
1301
|
+
}
|
|
1302
|
+
const indexes = Array.from(indexesMap.values()).map((idx) => ({
|
|
1303
|
+
columns: Object.freeze([...idx.columns]),
|
|
1304
|
+
name: idx.name,
|
|
1305
|
+
unique: idx.unique
|
|
1306
|
+
}));
|
|
1307
|
+
tables[tableName] = {
|
|
1308
|
+
name: tableName,
|
|
1309
|
+
columns,
|
|
1310
|
+
...ifDefined("primaryKey", primaryKey),
|
|
1311
|
+
foreignKeys,
|
|
1312
|
+
uniques,
|
|
1313
|
+
indexes
|
|
1314
|
+
};
|
|
1315
|
+
}
|
|
1316
|
+
const extensions = extensionsResult.rows.map((row) => row.extname);
|
|
1317
|
+
const storageTypes = await pgEnumControlHooks.introspectTypes?.({
|
|
1318
|
+
driver,
|
|
1319
|
+
schemaName: schema
|
|
1320
|
+
}) ?? {};
|
|
1321
|
+
return {
|
|
1322
|
+
tables,
|
|
1323
|
+
extensions,
|
|
1324
|
+
annotations: { pg: {
|
|
1325
|
+
schema,
|
|
1326
|
+
version: await this.getPostgresVersion(driver),
|
|
1327
|
+
...ifDefined("storageTypes", Object.keys(storageTypes).length > 0 ? storageTypes : void 0)
|
|
1328
|
+
} }
|
|
1329
|
+
};
|
|
1330
|
+
}
|
|
1331
|
+
/**
|
|
1332
|
+
* Gets the Postgres version from the database.
|
|
1333
|
+
*/
|
|
1334
|
+
async getPostgresVersion(driver) {
|
|
1335
|
+
return ((await driver.query("SELECT version() AS version", [])).rows[0]?.version ?? "").match(/PostgreSQL (\d+\.\d+)/)?.[1] ?? "unknown";
|
|
1336
|
+
}
|
|
1337
|
+
};
|
|
1338
|
+
/**
|
|
1339
|
+
* Pre-computed lookup map for simple prefix-based type normalization.
|
|
1340
|
+
* Maps short Postgres type names to their canonical SQL names.
|
|
1341
|
+
* Using a Map for O(1) lookup instead of multiple startsWith checks.
|
|
1342
|
+
*/
|
|
1343
|
+
const TYPE_PREFIX_MAP = new Map([
|
|
1344
|
+
["varchar", "character varying"],
|
|
1345
|
+
["bpchar", "character"],
|
|
1346
|
+
["varbit", "bit varying"]
|
|
1347
|
+
]);
|
|
1348
|
+
/**
|
|
1349
|
+
* Normalizes a Postgres schema native type to its canonical form for comparison.
|
|
1350
|
+
*
|
|
1351
|
+
* Uses a pre-computed lookup map for simple prefix replacements (O(1))
|
|
1352
|
+
* and handles complex temporal type normalization separately.
|
|
1353
|
+
*/
|
|
1354
|
+
function normalizeSchemaNativeType(nativeType) {
|
|
1355
|
+
const trimmed = nativeType.trim();
|
|
1356
|
+
for (const [prefix, replacement] of TYPE_PREFIX_MAP) if (trimmed.startsWith(prefix)) return replacement + trimmed.slice(prefix.length);
|
|
1357
|
+
if (trimmed.includes(" with time zone")) {
|
|
1358
|
+
if (trimmed.startsWith("timestamp")) return `timestamptz${trimmed.slice(9).replace(" with time zone", "")}`;
|
|
1359
|
+
if (trimmed.startsWith("time")) return `timetz${trimmed.slice(4).replace(" with time zone", "")}`;
|
|
1360
|
+
}
|
|
1361
|
+
if (trimmed.includes(" without time zone")) return trimmed.replace(" without time zone", "");
|
|
1362
|
+
return trimmed;
|
|
1363
|
+
}
|
|
1364
|
+
function normalizeFormattedType(formattedType, dataType, udtName) {
|
|
1365
|
+
if (formattedType === "integer") return "int4";
|
|
1366
|
+
if (formattedType === "smallint") return "int2";
|
|
1367
|
+
if (formattedType === "bigint") return "int8";
|
|
1368
|
+
if (formattedType === "real") return "float4";
|
|
1369
|
+
if (formattedType === "double precision") return "float8";
|
|
1370
|
+
if (formattedType === "boolean") return "bool";
|
|
1371
|
+
if (formattedType.startsWith("varchar")) return formattedType.replace("varchar", "character varying");
|
|
1372
|
+
if (formattedType.startsWith("bpchar")) return formattedType.replace("bpchar", "character");
|
|
1373
|
+
if (formattedType.startsWith("varbit")) return formattedType.replace("varbit", "bit varying");
|
|
1374
|
+
if (dataType === "timestamp with time zone" || udtName === "timestamptz") return formattedType.replace("timestamp", "timestamptz").replace(" with time zone", "").trim();
|
|
1375
|
+
if (dataType === "timestamp without time zone" || udtName === "timestamp") return formattedType.replace(" without time zone", "").trim();
|
|
1376
|
+
if (dataType === "time with time zone" || udtName === "timetz") return formattedType.replace("time", "timetz").replace(" with time zone", "").trim();
|
|
1377
|
+
if (dataType === "time without time zone" || udtName === "time") return formattedType.replace(" without time zone", "").trim();
|
|
1378
|
+
if (formattedType.startsWith("\"") && formattedType.endsWith("\"")) return formattedType.slice(1, -1);
|
|
1379
|
+
return formattedType;
|
|
1380
|
+
}
|
|
1381
|
+
/**
|
|
1382
|
+
* Groups an array of objects by a specified key.
|
|
1383
|
+
* Returns a Map for O(1) lookup by group key.
|
|
1384
|
+
*/
|
|
1385
|
+
function groupBy(items, key) {
|
|
1386
|
+
const map = /* @__PURE__ */ new Map();
|
|
1387
|
+
for (const item of items) {
|
|
1388
|
+
const groupKey = item[key];
|
|
1389
|
+
let group = map.get(groupKey);
|
|
1390
|
+
if (!group) {
|
|
1391
|
+
group = [];
|
|
1392
|
+
map.set(groupKey, group);
|
|
1393
|
+
}
|
|
1394
|
+
group.push(item);
|
|
1395
|
+
}
|
|
1396
|
+
return map;
|
|
1397
|
+
}
|
|
1398
|
+
var control_default$1 = {
|
|
1399
|
+
...postgresAdapterDescriptorMeta,
|
|
1400
|
+
operationSignatures: () => [],
|
|
1401
|
+
create() {
|
|
1402
|
+
return new PostgresControlAdapter();
|
|
1403
|
+
}
|
|
1404
|
+
};
|
|
1405
|
+
|
|
1406
|
+
//#endregion
|
|
1407
|
+
//#region src/core/migrations/planner.ts
|
|
1408
|
+
const DEFAULT_PLANNER_CONFIG = { defaultSchema: "public" };
|
|
1409
|
+
function createPostgresMigrationPlanner(config = {}) {
|
|
1410
|
+
return new PostgresMigrationPlanner({
|
|
1411
|
+
...DEFAULT_PLANNER_CONFIG,
|
|
1412
|
+
...config
|
|
1413
|
+
});
|
|
1414
|
+
}
|
|
1415
|
+
var PostgresMigrationPlanner = class {
|
|
1416
|
+
constructor(config) {
|
|
1417
|
+
this.config = config;
|
|
1418
|
+
}
|
|
1419
|
+
plan(options) {
|
|
1420
|
+
const schemaName = options.schemaName ?? this.config.defaultSchema;
|
|
1421
|
+
const policyResult = this.ensureAdditivePolicy(options.policy);
|
|
1422
|
+
if (policyResult) return policyResult;
|
|
1423
|
+
const classification = this.classifySchema(options);
|
|
1424
|
+
if (classification.kind === "conflict") return plannerFailure(classification.conflicts);
|
|
1425
|
+
const codecHooks = extractCodecControlHooks(options.frameworkComponents);
|
|
1426
|
+
const operations = [];
|
|
1427
|
+
const storageTypePlan = this.buildStorageTypeOperations(options, schemaName, codecHooks);
|
|
1428
|
+
if (storageTypePlan.conflicts.length > 0) return plannerFailure(storageTypePlan.conflicts);
|
|
1429
|
+
operations.push(...this.buildDatabaseDependencyOperations(options), ...storageTypePlan.operations, ...this.buildTableOperations(options.contract.storage.tables, options.schema, schemaName), ...this.buildColumnOperations(options.contract.storage.tables, options.schema, schemaName), ...this.buildPrimaryKeyOperations(options.contract.storage.tables, options.schema, schemaName), ...this.buildUniqueOperations(options.contract.storage.tables, options.schema, schemaName), ...this.buildIndexOperations(options.contract.storage.tables, options.schema, schemaName), ...this.buildForeignKeyOperations(options.contract.storage.tables, options.schema, schemaName));
|
|
1430
|
+
return plannerSuccess(createMigrationPlan({
|
|
1431
|
+
targetId: "postgres",
|
|
1432
|
+
origin: null,
|
|
1433
|
+
destination: {
|
|
1434
|
+
storageHash: options.contract.storageHash,
|
|
1435
|
+
...ifDefined("profileHash", options.contract.profileHash)
|
|
1436
|
+
},
|
|
1437
|
+
operations
|
|
1438
|
+
}));
|
|
1439
|
+
}
|
|
1440
|
+
ensureAdditivePolicy(policy) {
|
|
1441
|
+
if (!policy.allowedOperationClasses.includes("additive")) return plannerFailure([{
|
|
1442
|
+
kind: "unsupportedOperation",
|
|
1443
|
+
summary: "Init planner requires additive operations be allowed",
|
|
1444
|
+
why: "The init planner only emits additive operations. Update the policy to include \"additive\"."
|
|
1445
|
+
}]);
|
|
1446
|
+
return null;
|
|
1447
|
+
}
|
|
1448
|
+
/**
|
|
1449
|
+
* Builds migration operations from component-owned database dependencies.
|
|
1450
|
+
* These operations install database-side persistence structures declared by components.
|
|
1451
|
+
*/
|
|
1452
|
+
buildDatabaseDependencyOperations(options) {
|
|
1453
|
+
const dependencies = this.collectDependencies(options);
|
|
1454
|
+
const operations = [];
|
|
1455
|
+
const seenDependencyIds = /* @__PURE__ */ new Set();
|
|
1456
|
+
const seenOperationIds = /* @__PURE__ */ new Set();
|
|
1457
|
+
for (const dependency of dependencies) {
|
|
1458
|
+
if (seenDependencyIds.has(dependency.id)) continue;
|
|
1459
|
+
seenDependencyIds.add(dependency.id);
|
|
1460
|
+
if (dependency.verifyDatabaseDependencyInstalled(options.schema).length === 0) continue;
|
|
1461
|
+
for (const installOp of dependency.install) {
|
|
1462
|
+
if (seenOperationIds.has(installOp.id)) continue;
|
|
1463
|
+
seenOperationIds.add(installOp.id);
|
|
1464
|
+
operations.push(installOp);
|
|
1465
|
+
}
|
|
1466
|
+
}
|
|
1467
|
+
return operations;
|
|
1468
|
+
}
|
|
1469
|
+
buildStorageTypeOperations(options, schemaName, codecHooks) {
|
|
1470
|
+
const operations = [];
|
|
1471
|
+
const conflicts = [];
|
|
1472
|
+
const storageTypes = options.contract.storage.types ?? {};
|
|
1473
|
+
for (const [typeName, typeInstance] of sortedEntries(storageTypes)) {
|
|
1474
|
+
const planResult = codecHooks.get(typeInstance.codecId)?.planTypeOperations?.({
|
|
1475
|
+
typeName,
|
|
1476
|
+
typeInstance,
|
|
1477
|
+
contract: options.contract,
|
|
1478
|
+
schema: options.schema,
|
|
1479
|
+
schemaName,
|
|
1480
|
+
policy: options.policy
|
|
1481
|
+
});
|
|
1482
|
+
if (!planResult) continue;
|
|
1483
|
+
for (const operation of planResult.operations) {
|
|
1484
|
+
if (!options.policy.allowedOperationClasses.includes(operation.operationClass)) {
|
|
1485
|
+
conflicts.push({
|
|
1486
|
+
kind: "missingButNonAdditive",
|
|
1487
|
+
summary: `Storage type "${typeName}" requires "${operation.operationClass}" operation "${operation.id}"`,
|
|
1488
|
+
location: { type: typeName }
|
|
1489
|
+
});
|
|
1490
|
+
continue;
|
|
1491
|
+
}
|
|
1492
|
+
operations.push({
|
|
1493
|
+
...operation,
|
|
1494
|
+
target: {
|
|
1495
|
+
id: operation.target.id,
|
|
1496
|
+
details: this.buildTargetDetails("type", typeName, schemaName)
|
|
1497
|
+
}
|
|
1498
|
+
});
|
|
1499
|
+
}
|
|
1500
|
+
}
|
|
1501
|
+
return {
|
|
1502
|
+
operations,
|
|
1503
|
+
conflicts
|
|
1504
|
+
};
|
|
1505
|
+
}
|
|
1506
|
+
collectDependencies(options) {
|
|
1507
|
+
const components = options.frameworkComponents;
|
|
1508
|
+
if (components.length === 0) return [];
|
|
1509
|
+
const deps = [];
|
|
1510
|
+
for (const component of components) {
|
|
1511
|
+
if (!isSqlDependencyProvider(component)) continue;
|
|
1512
|
+
const initDeps = component.databaseDependencies?.init;
|
|
1513
|
+
if (initDeps && initDeps.length > 0) deps.push(...initDeps);
|
|
1514
|
+
}
|
|
1515
|
+
return sortDependencies(deps);
|
|
1516
|
+
}
|
|
1517
|
+
buildTableOperations(tables, schema, schemaName) {
|
|
1518
|
+
const operations = [];
|
|
1519
|
+
for (const [tableName, table] of sortedEntries(tables)) {
|
|
1520
|
+
if (schema.tables[tableName]) continue;
|
|
1521
|
+
const qualified = qualifyTableName(schemaName, tableName);
|
|
1522
|
+
operations.push({
|
|
1523
|
+
id: `table.${tableName}`,
|
|
1524
|
+
label: `Create table ${tableName}`,
|
|
1525
|
+
summary: `Creates table ${tableName} with required columns`,
|
|
1526
|
+
operationClass: "additive",
|
|
1527
|
+
target: {
|
|
1528
|
+
id: "postgres",
|
|
1529
|
+
details: this.buildTargetDetails("table", tableName, schemaName)
|
|
1530
|
+
},
|
|
1531
|
+
precheck: [{
|
|
1532
|
+
description: `ensure table "${tableName}" does not exist`,
|
|
1533
|
+
sql: `SELECT to_regclass(${toRegclassLiteral(schemaName, tableName)}) IS NULL`
|
|
1534
|
+
}],
|
|
1535
|
+
execute: [{
|
|
1536
|
+
description: `create table "${tableName}"`,
|
|
1537
|
+
sql: buildCreateTableSql(qualified, table)
|
|
1538
|
+
}],
|
|
1539
|
+
postcheck: [{
|
|
1540
|
+
description: `verify table "${tableName}" exists`,
|
|
1541
|
+
sql: `SELECT to_regclass(${toRegclassLiteral(schemaName, tableName)}) IS NOT NULL`
|
|
1542
|
+
}]
|
|
1543
|
+
});
|
|
1544
|
+
}
|
|
1545
|
+
return operations;
|
|
1546
|
+
}
|
|
1547
|
+
buildColumnOperations(tables, schema, schemaName) {
|
|
1548
|
+
const operations = [];
|
|
1549
|
+
for (const [tableName, table] of sortedEntries(tables)) {
|
|
1550
|
+
const schemaTable = schema.tables[tableName];
|
|
1551
|
+
if (!schemaTable) continue;
|
|
1552
|
+
for (const [columnName, column] of sortedEntries(table.columns)) {
|
|
1553
|
+
if (schemaTable.columns[columnName]) continue;
|
|
1554
|
+
operations.push(this.buildAddColumnOperation(schemaName, tableName, columnName, column));
|
|
1555
|
+
}
|
|
1556
|
+
}
|
|
1557
|
+
return operations;
|
|
1558
|
+
}
|
|
1559
|
+
buildAddColumnOperation(schema, tableName, columnName, column) {
|
|
1560
|
+
const qualified = qualifyTableName(schema, tableName);
|
|
1561
|
+
const notNull = column.nullable === false;
|
|
1562
|
+
const hasDefault = column.default !== void 0;
|
|
1563
|
+
const requiresEmptyTable = notNull && !hasDefault;
|
|
1564
|
+
const precheck = [{
|
|
1565
|
+
description: `ensure column "${columnName}" is missing`,
|
|
1566
|
+
sql: columnExistsCheck({
|
|
1567
|
+
schema,
|
|
1568
|
+
table: tableName,
|
|
1569
|
+
column: columnName,
|
|
1570
|
+
exists: false
|
|
1571
|
+
})
|
|
1572
|
+
}, ...requiresEmptyTable ? [{
|
|
1573
|
+
description: `ensure table "${tableName}" is empty before adding NOT NULL column without default`,
|
|
1574
|
+
sql: tableIsEmptyCheck(qualified)
|
|
1575
|
+
}] : []];
|
|
1576
|
+
const execute = [{
|
|
1577
|
+
description: `add column "${columnName}"`,
|
|
1578
|
+
sql: buildAddColumnSql(qualified, columnName, column)
|
|
1579
|
+
}];
|
|
1580
|
+
const postcheck = [{
|
|
1581
|
+
description: `verify column "${columnName}" exists`,
|
|
1582
|
+
sql: columnExistsCheck({
|
|
1583
|
+
schema,
|
|
1584
|
+
table: tableName,
|
|
1585
|
+
column: columnName
|
|
1586
|
+
})
|
|
1587
|
+
}, ...notNull ? [{
|
|
1588
|
+
description: `verify column "${columnName}" is NOT NULL`,
|
|
1589
|
+
sql: columnIsNotNullCheck({
|
|
1590
|
+
schema,
|
|
1591
|
+
table: tableName,
|
|
1592
|
+
column: columnName
|
|
1593
|
+
})
|
|
1594
|
+
}] : []];
|
|
1595
|
+
return {
|
|
1596
|
+
id: `column.${tableName}.${columnName}`,
|
|
1597
|
+
label: `Add column ${columnName} to ${tableName}`,
|
|
1598
|
+
summary: `Adds column ${columnName} to table ${tableName}`,
|
|
1599
|
+
operationClass: "additive",
|
|
1600
|
+
target: {
|
|
1601
|
+
id: "postgres",
|
|
1602
|
+
details: this.buildTargetDetails("table", tableName, schema)
|
|
1603
|
+
},
|
|
1604
|
+
precheck,
|
|
1605
|
+
execute,
|
|
1606
|
+
postcheck
|
|
1607
|
+
};
|
|
1608
|
+
}
|
|
1609
|
+
buildPrimaryKeyOperations(tables, schema, schemaName) {
|
|
1610
|
+
const operations = [];
|
|
1611
|
+
for (const [tableName, table] of sortedEntries(tables)) {
|
|
1612
|
+
if (!table.primaryKey) continue;
|
|
1613
|
+
const schemaTable = schema.tables[tableName];
|
|
1614
|
+
if (!schemaTable || schemaTable.primaryKey) continue;
|
|
1615
|
+
const constraintName = table.primaryKey.name ?? `${tableName}_pkey`;
|
|
1616
|
+
operations.push({
|
|
1617
|
+
id: `primaryKey.${tableName}.${constraintName}`,
|
|
1618
|
+
label: `Add primary key ${constraintName} on ${tableName}`,
|
|
1619
|
+
summary: `Adds primary key ${constraintName} on ${tableName}`,
|
|
1620
|
+
operationClass: "additive",
|
|
1621
|
+
target: {
|
|
1622
|
+
id: "postgres",
|
|
1623
|
+
details: this.buildTargetDetails("table", tableName, schemaName)
|
|
1624
|
+
},
|
|
1625
|
+
precheck: [{
|
|
1626
|
+
description: `ensure primary key does not exist on "${tableName}"`,
|
|
1627
|
+
sql: tableHasPrimaryKeyCheck(schemaName, tableName, false)
|
|
1628
|
+
}],
|
|
1629
|
+
execute: [{
|
|
1630
|
+
description: `add primary key "${constraintName}"`,
|
|
1631
|
+
sql: `ALTER TABLE ${qualifyTableName(schemaName, tableName)}
|
|
1632
|
+
ADD CONSTRAINT ${quoteIdentifier(constraintName)}
|
|
1633
|
+
PRIMARY KEY (${table.primaryKey.columns.map(quoteIdentifier).join(", ")})`
|
|
1634
|
+
}],
|
|
1635
|
+
postcheck: [{
|
|
1636
|
+
description: `verify primary key "${constraintName}" exists`,
|
|
1637
|
+
sql: tableHasPrimaryKeyCheck(schemaName, tableName, true, constraintName)
|
|
1638
|
+
}]
|
|
1639
|
+
});
|
|
1640
|
+
}
|
|
1641
|
+
return operations;
|
|
1642
|
+
}
|
|
1643
|
+
buildUniqueOperations(tables, schema, schemaName) {
|
|
1644
|
+
const operations = [];
|
|
1645
|
+
for (const [tableName, table] of sortedEntries(tables)) {
|
|
1646
|
+
const schemaTable = schema.tables[tableName];
|
|
1647
|
+
for (const unique of table.uniques) {
|
|
1648
|
+
if (schemaTable && hasUniqueConstraint(schemaTable, unique.columns)) continue;
|
|
1649
|
+
const constraintName = unique.name ?? `${tableName}_${unique.columns.join("_")}_key`;
|
|
1650
|
+
operations.push({
|
|
1651
|
+
id: `unique.${tableName}.${constraintName}`,
|
|
1652
|
+
label: `Add unique constraint ${constraintName} on ${tableName}`,
|
|
1653
|
+
summary: `Adds unique constraint ${constraintName} on ${tableName}`,
|
|
1654
|
+
operationClass: "additive",
|
|
1655
|
+
target: {
|
|
1656
|
+
id: "postgres",
|
|
1657
|
+
details: this.buildTargetDetails("unique", constraintName, schemaName, tableName)
|
|
1658
|
+
},
|
|
1659
|
+
precheck: [{
|
|
1660
|
+
description: `ensure unique constraint "${constraintName}" is missing`,
|
|
1661
|
+
sql: constraintExistsCheck({
|
|
1662
|
+
constraintName,
|
|
1663
|
+
schema: schemaName,
|
|
1664
|
+
exists: false
|
|
1665
|
+
})
|
|
1666
|
+
}],
|
|
1667
|
+
execute: [{
|
|
1668
|
+
description: `add unique constraint "${constraintName}"`,
|
|
1669
|
+
sql: `ALTER TABLE ${qualifyTableName(schemaName, tableName)}
|
|
1670
|
+
ADD CONSTRAINT ${quoteIdentifier(constraintName)}
|
|
1671
|
+
UNIQUE (${unique.columns.map(quoteIdentifier).join(", ")})`
|
|
1672
|
+
}],
|
|
1673
|
+
postcheck: [{
|
|
1674
|
+
description: `verify unique constraint "${constraintName}" exists`,
|
|
1675
|
+
sql: constraintExistsCheck({
|
|
1676
|
+
constraintName,
|
|
1677
|
+
schema: schemaName
|
|
1678
|
+
})
|
|
1679
|
+
}]
|
|
1680
|
+
});
|
|
1681
|
+
}
|
|
1682
|
+
}
|
|
1683
|
+
return operations;
|
|
1684
|
+
}
|
|
1685
|
+
buildIndexOperations(tables, schema, schemaName) {
|
|
1686
|
+
const operations = [];
|
|
1687
|
+
for (const [tableName, table] of sortedEntries(tables)) {
|
|
1688
|
+
const schemaTable = schema.tables[tableName];
|
|
1689
|
+
for (const index of table.indexes) {
|
|
1690
|
+
if (schemaTable && hasIndex(schemaTable, index.columns)) continue;
|
|
1691
|
+
const indexName = index.name ?? `${tableName}_${index.columns.join("_")}_idx`;
|
|
1692
|
+
operations.push({
|
|
1693
|
+
id: `index.${tableName}.${indexName}`,
|
|
1694
|
+
label: `Create index ${indexName} on ${tableName}`,
|
|
1695
|
+
summary: `Creates index ${indexName} on ${tableName}`,
|
|
1696
|
+
operationClass: "additive",
|
|
1697
|
+
target: {
|
|
1698
|
+
id: "postgres",
|
|
1699
|
+
details: this.buildTargetDetails("index", indexName, schemaName, tableName)
|
|
1700
|
+
},
|
|
1701
|
+
precheck: [{
|
|
1702
|
+
description: `ensure index "${indexName}" is missing`,
|
|
1703
|
+
sql: `SELECT to_regclass(${toRegclassLiteral(schemaName, indexName)}) IS NULL`
|
|
1704
|
+
}],
|
|
1705
|
+
execute: [{
|
|
1706
|
+
description: `create index "${indexName}"`,
|
|
1707
|
+
sql: `CREATE INDEX ${quoteIdentifier(indexName)} ON ${qualifyTableName(schemaName, tableName)} (${index.columns.map(quoteIdentifier).join(", ")})`
|
|
1708
|
+
}],
|
|
1709
|
+
postcheck: [{
|
|
1710
|
+
description: `verify index "${indexName}" exists`,
|
|
1711
|
+
sql: `SELECT to_regclass(${toRegclassLiteral(schemaName, indexName)}) IS NOT NULL`
|
|
1712
|
+
}]
|
|
1713
|
+
});
|
|
1714
|
+
}
|
|
1715
|
+
}
|
|
1716
|
+
return operations;
|
|
1717
|
+
}
|
|
1718
|
+
buildForeignKeyOperations(tables, schema, schemaName) {
|
|
1719
|
+
const operations = [];
|
|
1720
|
+
for (const [tableName, table] of sortedEntries(tables)) {
|
|
1721
|
+
const schemaTable = schema.tables[tableName];
|
|
1722
|
+
for (const foreignKey of table.foreignKeys) {
|
|
1723
|
+
if (schemaTable && hasForeignKey(schemaTable, foreignKey)) continue;
|
|
1724
|
+
const fkName = foreignKey.name ?? `${tableName}_${foreignKey.columns.join("_")}_fkey`;
|
|
1725
|
+
operations.push({
|
|
1726
|
+
id: `foreignKey.${tableName}.${fkName}`,
|
|
1727
|
+
label: `Add foreign key ${fkName} on ${tableName}`,
|
|
1728
|
+
summary: `Adds foreign key ${fkName} referencing ${foreignKey.references.table}`,
|
|
1729
|
+
operationClass: "additive",
|
|
1730
|
+
target: {
|
|
1731
|
+
id: "postgres",
|
|
1732
|
+
details: this.buildTargetDetails("foreignKey", fkName, schemaName, tableName)
|
|
1733
|
+
},
|
|
1734
|
+
precheck: [{
|
|
1735
|
+
description: `ensure foreign key "${fkName}" is missing`,
|
|
1736
|
+
sql: constraintExistsCheck({
|
|
1737
|
+
constraintName: fkName,
|
|
1738
|
+
schema: schemaName,
|
|
1739
|
+
exists: false
|
|
1740
|
+
})
|
|
1741
|
+
}],
|
|
1742
|
+
execute: [{
|
|
1743
|
+
description: `add foreign key "${fkName}"`,
|
|
1744
|
+
sql: `ALTER TABLE ${qualifyTableName(schemaName, tableName)}
|
|
1745
|
+
ADD CONSTRAINT ${quoteIdentifier(fkName)}
|
|
1746
|
+
FOREIGN KEY (${foreignKey.columns.map(quoteIdentifier).join(", ")})
|
|
1747
|
+
REFERENCES ${qualifyTableName(schemaName, foreignKey.references.table)} (${foreignKey.references.columns.map(quoteIdentifier).join(", ")})`
|
|
1748
|
+
}],
|
|
1749
|
+
postcheck: [{
|
|
1750
|
+
description: `verify foreign key "${fkName}" exists`,
|
|
1751
|
+
sql: constraintExistsCheck({
|
|
1752
|
+
constraintName: fkName,
|
|
1753
|
+
schema: schemaName
|
|
1754
|
+
})
|
|
1755
|
+
}]
|
|
1756
|
+
});
|
|
1757
|
+
}
|
|
1758
|
+
}
|
|
1759
|
+
return operations;
|
|
1760
|
+
}
|
|
1761
|
+
buildTargetDetails(objectType, name, schema, table) {
|
|
1762
|
+
return {
|
|
1763
|
+
schema,
|
|
1764
|
+
objectType,
|
|
1765
|
+
name,
|
|
1766
|
+
...ifDefined("table", table)
|
|
1767
|
+
};
|
|
1768
|
+
}
|
|
1769
|
+
classifySchema(options) {
|
|
1770
|
+
const verifyResult = verifySqlSchema({
|
|
1771
|
+
contract: options.contract,
|
|
1772
|
+
schema: options.schema,
|
|
1773
|
+
strict: false,
|
|
1774
|
+
typeMetadataRegistry: /* @__PURE__ */ new Map(),
|
|
1775
|
+
frameworkComponents: options.frameworkComponents,
|
|
1776
|
+
normalizeDefault: parsePostgresDefault,
|
|
1777
|
+
normalizeNativeType: normalizeSchemaNativeType
|
|
1778
|
+
});
|
|
1779
|
+
const conflicts = this.extractConflicts(verifyResult.schema.issues);
|
|
1780
|
+
if (conflicts.length > 0) return {
|
|
1781
|
+
kind: "conflict",
|
|
1782
|
+
conflicts
|
|
1783
|
+
};
|
|
1784
|
+
return { kind: "ok" };
|
|
1785
|
+
}
|
|
1786
|
+
extractConflicts(issues) {
|
|
1787
|
+
const conflicts = [];
|
|
1788
|
+
for (const issue of issues) {
|
|
1789
|
+
if (isAdditiveIssue(issue)) continue;
|
|
1790
|
+
const conflict = this.convertIssueToConflict(issue);
|
|
1791
|
+
if (conflict) conflicts.push(conflict);
|
|
1792
|
+
}
|
|
1793
|
+
return conflicts.sort(conflictComparator);
|
|
1794
|
+
}
|
|
1795
|
+
convertIssueToConflict(issue) {
|
|
1796
|
+
switch (issue.kind) {
|
|
1797
|
+
case "type_mismatch": return this.buildConflict("typeMismatch", issue);
|
|
1798
|
+
case "nullability_mismatch": return this.buildConflict("nullabilityConflict", issue);
|
|
1799
|
+
case "primary_key_mismatch": return this.buildConflict("indexIncompatible", issue);
|
|
1800
|
+
case "unique_constraint_mismatch": return this.buildConflict("indexIncompatible", issue);
|
|
1801
|
+
case "index_mismatch": return this.buildConflict("indexIncompatible", issue);
|
|
1802
|
+
case "foreign_key_mismatch": return this.buildConflict("foreignKeyConflict", issue);
|
|
1803
|
+
default: return null;
|
|
1804
|
+
}
|
|
1805
|
+
}
|
|
1806
|
+
buildConflict(kind, issue) {
|
|
1807
|
+
const location = buildConflictLocation(issue);
|
|
1808
|
+
const meta = issue.expected || issue.actual ? Object.freeze({
|
|
1809
|
+
...ifDefined("expected", issue.expected),
|
|
1810
|
+
...ifDefined("actual", issue.actual)
|
|
1811
|
+
}) : void 0;
|
|
1812
|
+
return {
|
|
1813
|
+
kind,
|
|
1814
|
+
summary: issue.message,
|
|
1815
|
+
...ifDefined("location", location),
|
|
1816
|
+
...ifDefined("meta", meta)
|
|
1817
|
+
};
|
|
1818
|
+
}
|
|
1819
|
+
};
|
|
1820
|
+
function isSqlDependencyProvider(component) {
|
|
1821
|
+
if (typeof component !== "object" || component === null) return false;
|
|
1822
|
+
const record = component;
|
|
1823
|
+
if (Object.hasOwn(record, "familyId") && record["familyId"] !== "sql") return false;
|
|
1824
|
+
if (!Object.hasOwn(record, "databaseDependencies")) return false;
|
|
1825
|
+
const deps = record["databaseDependencies"];
|
|
1826
|
+
return deps === void 0 || typeof deps === "object" && deps !== null;
|
|
1827
|
+
}
|
|
1828
|
+
function sortDependencies(dependencies) {
|
|
1829
|
+
if (dependencies.length <= 1) return dependencies;
|
|
1830
|
+
return [...dependencies].sort((a, b) => a.id.localeCompare(b.id));
|
|
1831
|
+
}
|
|
1832
|
+
function buildCreateTableSql(qualifiedTableName, table) {
|
|
1833
|
+
const columnDefinitions = Object.entries(table.columns).map(([columnName, column]) => {
|
|
1834
|
+
return [
|
|
1835
|
+
quoteIdentifier(columnName),
|
|
1836
|
+
buildColumnTypeSql(column),
|
|
1837
|
+
buildColumnDefaultSql(column.default),
|
|
1838
|
+
column.nullable ? "" : "NOT NULL"
|
|
1839
|
+
].filter(Boolean).join(" ");
|
|
1840
|
+
});
|
|
1841
|
+
const constraintDefinitions = [];
|
|
1842
|
+
if (table.primaryKey) constraintDefinitions.push(`PRIMARY KEY (${table.primaryKey.columns.map(quoteIdentifier).join(", ")})`);
|
|
1843
|
+
return `CREATE TABLE ${qualifiedTableName} (\n ${[...columnDefinitions, ...constraintDefinitions].join(",\n ")}\n)`;
|
|
1844
|
+
}
|
|
1845
|
+
/**
|
|
1846
|
+
* Builds the column type SQL, handling autoincrement as a special case.
|
|
1847
|
+
* For autoincrement on int4/int8, we use SERIAL/BIGSERIAL types.
|
|
1848
|
+
*/
|
|
1849
|
+
function buildColumnTypeSql(column) {
|
|
1850
|
+
const columnDefault = column.default;
|
|
1851
|
+
if (columnDefault?.kind === "function" && columnDefault.expression === "autoincrement()") {
|
|
1852
|
+
if (column.nativeType === "int4" || column.nativeType === "integer") return "SERIAL";
|
|
1853
|
+
if (column.nativeType === "int8" || column.nativeType === "bigint") return "BIGSERIAL";
|
|
1854
|
+
if (column.nativeType === "int2" || column.nativeType === "smallint") return "SMALLSERIAL";
|
|
1855
|
+
}
|
|
1856
|
+
if (column.typeRef) return quoteIdentifier(column.nativeType);
|
|
1857
|
+
return renderParameterizedTypeSql(column) ?? column.nativeType;
|
|
1858
|
+
}
|
|
1859
|
+
/**
|
|
1860
|
+
* Renders parameterized type SQL for a column, returning null if no expansion is needed.
|
|
1861
|
+
*
|
|
1862
|
+
* Uses the shared expandParameterizedNativeType utility from the postgres adapter.
|
|
1863
|
+
* Returns null when the column has no typeParams, allowing the caller to fall back
|
|
1864
|
+
* to the base nativeType.
|
|
1865
|
+
*/
|
|
1866
|
+
function renderParameterizedTypeSql(column) {
|
|
1867
|
+
if (!column.typeParams) return null;
|
|
1868
|
+
const expanded = expandParameterizedNativeType({
|
|
1869
|
+
nativeType: column.nativeType,
|
|
1870
|
+
codecId: column.codecId,
|
|
1871
|
+
typeParams: column.typeParams
|
|
1872
|
+
});
|
|
1873
|
+
return expanded !== column.nativeType ? expanded : null;
|
|
1874
|
+
}
|
|
1875
|
+
/**
|
|
1876
|
+
* Builds the DEFAULT clause for a column definition.
|
|
1877
|
+
* Returns empty string if no default is defined.
|
|
1878
|
+
*
|
|
1879
|
+
* Note: autoincrement is handled specially via SERIAL types, so we skip it here.
|
|
1880
|
+
*/
|
|
1881
|
+
function buildColumnDefaultSql(columnDefault) {
|
|
1882
|
+
if (!columnDefault) return "";
|
|
1883
|
+
switch (columnDefault.kind) {
|
|
1884
|
+
case "literal": return `DEFAULT ${columnDefault.expression}`;
|
|
1885
|
+
case "function":
|
|
1886
|
+
if (columnDefault.expression === "autoincrement()") return "";
|
|
1887
|
+
return `DEFAULT ${columnDefault.expression}`;
|
|
1888
|
+
case "sequence": return `DEFAULT nextval(${quoteIdentifier(columnDefault.name)}::regclass)`;
|
|
1889
|
+
}
|
|
1890
|
+
}
|
|
1891
|
+
function qualifyTableName(schema, table) {
|
|
1892
|
+
return `${quoteIdentifier(schema)}.${quoteIdentifier(table)}`;
|
|
1893
|
+
}
|
|
1894
|
+
function toRegclassLiteral(schema, name) {
|
|
1895
|
+
return `'${escapeLiteral(`${quoteIdentifier(schema)}.${quoteIdentifier(name)}`)}'`;
|
|
1896
|
+
}
|
|
1897
|
+
function sortedEntries(record) {
|
|
1898
|
+
return Object.entries(record).sort(([a], [b]) => a.localeCompare(b));
|
|
1899
|
+
}
|
|
1900
|
+
function constraintExistsCheck({ constraintName, schema, exists = true }) {
|
|
1901
|
+
return `SELECT ${exists ? "EXISTS" : "NOT EXISTS"} (
|
|
1902
|
+
SELECT 1 FROM pg_constraint c
|
|
1903
|
+
JOIN pg_namespace n ON c.connamespace = n.oid
|
|
1904
|
+
WHERE c.conname = '${escapeLiteral(constraintName)}'
|
|
1905
|
+
AND n.nspname = '${escapeLiteral(schema)}'
|
|
1906
|
+
)`;
|
|
1907
|
+
}
|
|
1908
|
+
function columnExistsCheck({ schema, table, column, exists = true }) {
|
|
1909
|
+
return `SELECT ${exists ? "" : "NOT "}EXISTS (
|
|
1910
|
+
SELECT 1
|
|
1911
|
+
FROM information_schema.columns
|
|
1912
|
+
WHERE table_schema = '${escapeLiteral(schema)}'
|
|
1913
|
+
AND table_name = '${escapeLiteral(table)}'
|
|
1914
|
+
AND column_name = '${escapeLiteral(column)}'
|
|
1915
|
+
)`;
|
|
1916
|
+
}
|
|
1917
|
+
function columnIsNotNullCheck({ schema, table, column }) {
|
|
1918
|
+
return `SELECT EXISTS (
|
|
1919
|
+
SELECT 1
|
|
1920
|
+
FROM information_schema.columns
|
|
1921
|
+
WHERE table_schema = '${escapeLiteral(schema)}'
|
|
1922
|
+
AND table_name = '${escapeLiteral(table)}'
|
|
1923
|
+
AND column_name = '${escapeLiteral(column)}'
|
|
1924
|
+
AND is_nullable = 'NO'
|
|
1925
|
+
)`;
|
|
1926
|
+
}
|
|
1927
|
+
function tableIsEmptyCheck(qualifiedTableName) {
|
|
1928
|
+
return `SELECT NOT EXISTS (SELECT 1 FROM ${qualifiedTableName} LIMIT 1)`;
|
|
1929
|
+
}
|
|
1930
|
+
function buildAddColumnSql(qualifiedTableName, columnName, column) {
|
|
1931
|
+
const typeSql = buildColumnTypeSql(column);
|
|
1932
|
+
const defaultSql = buildColumnDefaultSql(column.default);
|
|
1933
|
+
return [
|
|
1934
|
+
`ALTER TABLE ${qualifiedTableName}`,
|
|
1935
|
+
`ADD COLUMN ${quoteIdentifier(columnName)} ${typeSql}`,
|
|
1936
|
+
defaultSql,
|
|
1937
|
+
column.nullable ? "" : "NOT NULL"
|
|
1938
|
+
].filter(Boolean).join(" ");
|
|
1939
|
+
}
|
|
1940
|
+
function tableHasPrimaryKeyCheck(schema, table, exists, constraintName) {
|
|
1941
|
+
const comparison = exists ? "" : "NOT ";
|
|
1942
|
+
const constraintFilter = constraintName ? `AND c2.relname = '${escapeLiteral(constraintName)}'` : "";
|
|
1943
|
+
return `SELECT ${comparison}EXISTS (
|
|
1944
|
+
SELECT 1
|
|
1945
|
+
FROM pg_index i
|
|
1946
|
+
JOIN pg_class c ON c.oid = i.indrelid
|
|
1947
|
+
JOIN pg_namespace n ON n.oid = c.relnamespace
|
|
1948
|
+
LEFT JOIN pg_class c2 ON c2.oid = i.indexrelid
|
|
1949
|
+
WHERE n.nspname = '${escapeLiteral(schema)}'
|
|
1950
|
+
AND c.relname = '${escapeLiteral(table)}'
|
|
1951
|
+
AND i.indisprimary
|
|
1952
|
+
${constraintFilter}
|
|
1953
|
+
)`;
|
|
1954
|
+
}
|
|
1955
|
+
/**
|
|
1956
|
+
* Checks if table has a unique constraint satisfied by the given columns.
|
|
1957
|
+
* Uses shared semantic satisfaction predicate from verify-helpers.
|
|
1958
|
+
*/
|
|
1959
|
+
function hasUniqueConstraint(table, columns) {
|
|
1960
|
+
return isUniqueConstraintSatisfied(table.uniques, table.indexes, columns);
|
|
1961
|
+
}
|
|
1962
|
+
/**
|
|
1963
|
+
* Checks if table has an index satisfied by the given columns.
|
|
1964
|
+
* Uses shared semantic satisfaction predicate from verify-helpers.
|
|
1965
|
+
*/
|
|
1966
|
+
function hasIndex(table, columns) {
|
|
1967
|
+
return isIndexSatisfied(table.indexes, table.uniques, columns);
|
|
1968
|
+
}
|
|
1969
|
+
function hasForeignKey(table, fk) {
|
|
1970
|
+
return table.foreignKeys.some((candidate) => arraysEqual(candidate.columns, fk.columns) && candidate.referencedTable === fk.references.table && arraysEqual(candidate.referencedColumns, fk.references.columns));
|
|
1971
|
+
}
|
|
1972
|
+
function isAdditiveIssue(issue) {
|
|
1973
|
+
switch (issue.kind) {
|
|
1974
|
+
case "type_missing":
|
|
1975
|
+
case "type_values_mismatch":
|
|
1976
|
+
case "missing_table":
|
|
1977
|
+
case "missing_column":
|
|
1978
|
+
case "extension_missing": return true;
|
|
1979
|
+
case "primary_key_mismatch": return issue.actual === void 0;
|
|
1980
|
+
case "unique_constraint_mismatch":
|
|
1981
|
+
case "index_mismatch":
|
|
1982
|
+
case "foreign_key_mismatch": return issue.indexOrConstraint === void 0;
|
|
1983
|
+
default: return false;
|
|
1984
|
+
}
|
|
1985
|
+
}
|
|
1986
|
+
function buildConflictLocation(issue) {
|
|
1987
|
+
const location = {};
|
|
1988
|
+
if (issue.table) location.table = issue.table;
|
|
1989
|
+
if (issue.column) location.column = issue.column;
|
|
1990
|
+
if (issue.indexOrConstraint) location.constraint = issue.indexOrConstraint;
|
|
1991
|
+
return Object.keys(location).length > 0 ? location : void 0;
|
|
1992
|
+
}
|
|
1993
|
+
function conflictComparator(a, b) {
|
|
1994
|
+
if (a.kind !== b.kind) return a.kind < b.kind ? -1 : 1;
|
|
1995
|
+
const aLocation = a.location ?? {};
|
|
1996
|
+
const bLocation = b.location ?? {};
|
|
1997
|
+
const tableCompare = compareStrings(aLocation.table, bLocation.table);
|
|
1998
|
+
if (tableCompare !== 0) return tableCompare;
|
|
1999
|
+
const columnCompare = compareStrings(aLocation.column, bLocation.column);
|
|
2000
|
+
if (columnCompare !== 0) return columnCompare;
|
|
2001
|
+
const constraintCompare = compareStrings(aLocation.constraint, bLocation.constraint);
|
|
2002
|
+
if (constraintCompare !== 0) return constraintCompare;
|
|
2003
|
+
return compareStrings(a.summary, b.summary);
|
|
2004
|
+
}
|
|
2005
|
+
function compareStrings(a, b) {
|
|
2006
|
+
if (a === b) return 0;
|
|
2007
|
+
if (a === void 0) return -1;
|
|
2008
|
+
if (b === void 0) return 1;
|
|
2009
|
+
return a < b ? -1 : 1;
|
|
2010
|
+
}
|
|
2011
|
+
|
|
2012
|
+
//#endregion
|
|
2013
|
+
//#region src/core/migrations/statement-builders.ts
|
|
2014
|
+
const ensurePrismaContractSchemaStatement = {
|
|
2015
|
+
sql: "create schema if not exists prisma_contract",
|
|
2016
|
+
params: []
|
|
2017
|
+
};
|
|
2018
|
+
const ensureMarkerTableStatement = {
|
|
2019
|
+
sql: `create table if not exists prisma_contract.marker (
|
|
2020
|
+
id smallint primary key default 1,
|
|
2021
|
+
core_hash text not null,
|
|
2022
|
+
profile_hash text not null,
|
|
2023
|
+
contract_json jsonb,
|
|
2024
|
+
canonical_version int,
|
|
2025
|
+
updated_at timestamptz not null default now(),
|
|
2026
|
+
app_tag text,
|
|
2027
|
+
meta jsonb not null default '{}'
|
|
2028
|
+
)`,
|
|
2029
|
+
params: []
|
|
2030
|
+
};
|
|
2031
|
+
const ensureLedgerTableStatement = {
|
|
2032
|
+
sql: `create table if not exists prisma_contract.ledger (
|
|
2033
|
+
id bigserial primary key,
|
|
2034
|
+
created_at timestamptz not null default now(),
|
|
2035
|
+
origin_core_hash text,
|
|
2036
|
+
origin_profile_hash text,
|
|
2037
|
+
destination_core_hash text not null,
|
|
2038
|
+
destination_profile_hash text,
|
|
2039
|
+
contract_json_before jsonb,
|
|
2040
|
+
contract_json_after jsonb,
|
|
2041
|
+
operations jsonb not null
|
|
2042
|
+
)`,
|
|
2043
|
+
params: []
|
|
2044
|
+
};
|
|
2045
|
+
function buildWriteMarkerStatements(input) {
|
|
2046
|
+
const params = [
|
|
2047
|
+
1,
|
|
2048
|
+
input.storageHash,
|
|
2049
|
+
input.profileHash,
|
|
2050
|
+
jsonParam(input.contractJson),
|
|
2051
|
+
input.canonicalVersion ?? null,
|
|
2052
|
+
input.appTag ?? null,
|
|
2053
|
+
jsonParam(input.meta ?? {})
|
|
2054
|
+
];
|
|
2055
|
+
return {
|
|
2056
|
+
insert: {
|
|
2057
|
+
sql: `insert into prisma_contract.marker (
|
|
2058
|
+
id,
|
|
2059
|
+
core_hash,
|
|
2060
|
+
profile_hash,
|
|
2061
|
+
contract_json,
|
|
2062
|
+
canonical_version,
|
|
2063
|
+
updated_at,
|
|
2064
|
+
app_tag,
|
|
2065
|
+
meta
|
|
2066
|
+
) values (
|
|
2067
|
+
$1,
|
|
2068
|
+
$2,
|
|
2069
|
+
$3,
|
|
2070
|
+
$4::jsonb,
|
|
2071
|
+
$5,
|
|
2072
|
+
now(),
|
|
2073
|
+
$6,
|
|
2074
|
+
$7::jsonb
|
|
2075
|
+
)`,
|
|
2076
|
+
params
|
|
2077
|
+
},
|
|
2078
|
+
update: {
|
|
2079
|
+
sql: `update prisma_contract.marker set
|
|
2080
|
+
core_hash = $2,
|
|
2081
|
+
profile_hash = $3,
|
|
2082
|
+
contract_json = $4::jsonb,
|
|
2083
|
+
canonical_version = $5,
|
|
2084
|
+
updated_at = now(),
|
|
2085
|
+
app_tag = $6,
|
|
2086
|
+
meta = $7::jsonb
|
|
2087
|
+
where id = $1`,
|
|
2088
|
+
params
|
|
2089
|
+
}
|
|
2090
|
+
};
|
|
2091
|
+
}
|
|
2092
|
+
function buildLedgerInsertStatement(input) {
|
|
2093
|
+
return {
|
|
2094
|
+
sql: `insert into prisma_contract.ledger (
|
|
2095
|
+
origin_core_hash,
|
|
2096
|
+
origin_profile_hash,
|
|
2097
|
+
destination_core_hash,
|
|
2098
|
+
destination_profile_hash,
|
|
2099
|
+
contract_json_before,
|
|
2100
|
+
contract_json_after,
|
|
2101
|
+
operations
|
|
2102
|
+
) values (
|
|
2103
|
+
$1,
|
|
2104
|
+
$2,
|
|
2105
|
+
$3,
|
|
2106
|
+
$4,
|
|
2107
|
+
$5::jsonb,
|
|
2108
|
+
$6::jsonb,
|
|
2109
|
+
$7::jsonb
|
|
2110
|
+
)`,
|
|
2111
|
+
params: [
|
|
2112
|
+
input.originStorageHash ?? null,
|
|
2113
|
+
input.originProfileHash ?? null,
|
|
2114
|
+
input.destinationStorageHash,
|
|
2115
|
+
input.destinationProfileHash ?? null,
|
|
2116
|
+
jsonParam(input.contractJsonBefore),
|
|
2117
|
+
jsonParam(input.contractJsonAfter),
|
|
2118
|
+
jsonParam(input.operations)
|
|
2119
|
+
]
|
|
2120
|
+
};
|
|
2121
|
+
}
|
|
2122
|
+
function jsonParam(value) {
|
|
2123
|
+
return JSON.stringify(value ?? null);
|
|
2124
|
+
}
|
|
2125
|
+
|
|
2126
|
+
//#endregion
|
|
2127
|
+
//#region src/core/migrations/runner.ts
|
|
2128
|
+
const DEFAULT_CONFIG = { defaultSchema: "public" };
|
|
2129
|
+
const LOCK_DOMAIN = "prisma_next.contract.marker";
|
|
2130
|
+
/**
|
|
2131
|
+
* Deep clones and freezes a record object to prevent mutation.
|
|
2132
|
+
* Recursively clones nested objects and arrays to ensure complete isolation.
|
|
2133
|
+
*/
|
|
2134
|
+
function cloneAndFreezeRecord(value) {
|
|
2135
|
+
const cloned = {};
|
|
2136
|
+
for (const [key, val] of Object.entries(value)) if (val === null || val === void 0) cloned[key] = val;
|
|
2137
|
+
else if (Array.isArray(val)) cloned[key] = Object.freeze([...val]);
|
|
2138
|
+
else if (typeof val === "object") cloned[key] = cloneAndFreezeRecord(val);
|
|
2139
|
+
else cloned[key] = val;
|
|
2140
|
+
return Object.freeze(cloned);
|
|
2141
|
+
}
|
|
2142
|
+
function createPostgresMigrationRunner(family, config = {}) {
|
|
2143
|
+
return new PostgresMigrationRunner(family, {
|
|
2144
|
+
...DEFAULT_CONFIG,
|
|
2145
|
+
...config
|
|
2146
|
+
});
|
|
2147
|
+
}
|
|
2148
|
+
var PostgresMigrationRunner = class {
|
|
2149
|
+
constructor(family, config) {
|
|
2150
|
+
this.family = family;
|
|
2151
|
+
this.config = config;
|
|
2152
|
+
}
|
|
2153
|
+
async execute(options) {
|
|
2154
|
+
const schema = options.schemaName ?? this.config.defaultSchema;
|
|
2155
|
+
const driver = options.driver;
|
|
2156
|
+
const lockKey = `${LOCK_DOMAIN}:${schema}`;
|
|
2157
|
+
const destinationCheck = this.ensurePlanMatchesDestinationContract(options.plan.destination, options.destinationContract);
|
|
2158
|
+
if (!destinationCheck.ok) return destinationCheck;
|
|
2159
|
+
const policyCheck = this.enforcePolicyCompatibility(options.policy, options.plan.operations);
|
|
2160
|
+
if (!policyCheck.ok) return policyCheck;
|
|
2161
|
+
await this.beginTransaction(driver);
|
|
2162
|
+
let committed = false;
|
|
2163
|
+
try {
|
|
2164
|
+
await this.acquireLock(driver, lockKey);
|
|
2165
|
+
await this.ensureControlTables(driver);
|
|
2166
|
+
const existingMarker = await readMarker(driver);
|
|
2167
|
+
const markerCheck = this.ensureMarkerCompatibility(existingMarker, options.plan);
|
|
2168
|
+
if (!markerCheck.ok) return markerCheck;
|
|
2169
|
+
const markerAtDestination = this.markerMatchesDestination(existingMarker, options.plan);
|
|
2170
|
+
let applyValue;
|
|
2171
|
+
if (markerAtDestination) applyValue = {
|
|
2172
|
+
operationsExecuted: 0,
|
|
2173
|
+
executedOperations: []
|
|
2174
|
+
};
|
|
2175
|
+
else {
|
|
2176
|
+
const applyResult = await this.applyPlan(driver, options);
|
|
2177
|
+
if (!applyResult.ok) return applyResult;
|
|
2178
|
+
applyValue = applyResult.value;
|
|
2179
|
+
}
|
|
2180
|
+
const schemaIR = await this.family.introspect({
|
|
2181
|
+
driver,
|
|
2182
|
+
contractIR: options.destinationContract
|
|
2183
|
+
});
|
|
2184
|
+
const schemaVerifyResult = verifySqlSchema({
|
|
2185
|
+
contract: options.destinationContract,
|
|
2186
|
+
schema: schemaIR,
|
|
2187
|
+
strict: options.strictVerification ?? true,
|
|
2188
|
+
context: options.context ?? {},
|
|
2189
|
+
typeMetadataRegistry: this.family.typeMetadataRegistry,
|
|
2190
|
+
frameworkComponents: options.frameworkComponents,
|
|
2191
|
+
normalizeDefault: parsePostgresDefault,
|
|
2192
|
+
normalizeNativeType: normalizeSchemaNativeType
|
|
2193
|
+
});
|
|
2194
|
+
if (!schemaVerifyResult.ok) return runnerFailure("SCHEMA_VERIFY_FAILED", schemaVerifyResult.summary, {
|
|
2195
|
+
why: "The resulting database schema does not satisfy the destination contract.",
|
|
2196
|
+
meta: { issues: schemaVerifyResult.schema.issues }
|
|
2197
|
+
});
|
|
2198
|
+
await this.upsertMarker(driver, options, existingMarker);
|
|
2199
|
+
await this.recordLedgerEntry(driver, options, existingMarker, applyValue.executedOperations);
|
|
2200
|
+
await this.commitTransaction(driver);
|
|
2201
|
+
committed = true;
|
|
2202
|
+
return runnerSuccess({
|
|
2203
|
+
operationsPlanned: options.plan.operations.length,
|
|
2204
|
+
operationsExecuted: applyValue.operationsExecuted
|
|
2205
|
+
});
|
|
2206
|
+
} finally {
|
|
2207
|
+
if (!committed) await this.rollbackTransaction(driver);
|
|
2208
|
+
}
|
|
2209
|
+
}
|
|
2210
|
+
async applyPlan(driver, options) {
|
|
2211
|
+
const checks = options.executionChecks;
|
|
2212
|
+
const runPrechecks = checks?.prechecks !== false;
|
|
2213
|
+
const runPostchecks = checks?.postchecks !== false;
|
|
2214
|
+
const runIdempotency = checks?.idempotencyChecks !== false;
|
|
2215
|
+
let operationsExecuted = 0;
|
|
2216
|
+
const executedOperations = [];
|
|
2217
|
+
for (const operation of options.plan.operations) {
|
|
2218
|
+
options.callbacks?.onOperationStart?.(operation);
|
|
2219
|
+
try {
|
|
2220
|
+
if (runPostchecks && runIdempotency) {
|
|
2221
|
+
if (await this.expectationsAreSatisfied(driver, operation.postcheck)) {
|
|
2222
|
+
executedOperations.push(this.createPostcheckPreSatisfiedSkipRecord(operation));
|
|
2223
|
+
continue;
|
|
2224
|
+
}
|
|
2225
|
+
}
|
|
2226
|
+
if (runPrechecks) {
|
|
2227
|
+
const precheckResult = await this.runExpectationSteps(driver, operation.precheck, operation, "precheck");
|
|
2228
|
+
if (!precheckResult.ok) return precheckResult;
|
|
2229
|
+
}
|
|
2230
|
+
const executeResult = await this.runExecuteSteps(driver, operation.execute, operation);
|
|
2231
|
+
if (!executeResult.ok) return executeResult;
|
|
2232
|
+
if (runPostchecks) {
|
|
2233
|
+
const postcheckResult = await this.runExpectationSteps(driver, operation.postcheck, operation, "postcheck");
|
|
2234
|
+
if (!postcheckResult.ok) return postcheckResult;
|
|
2235
|
+
}
|
|
2236
|
+
executedOperations.push(operation);
|
|
2237
|
+
operationsExecuted += 1;
|
|
2238
|
+
} finally {
|
|
2239
|
+
options.callbacks?.onOperationComplete?.(operation);
|
|
2240
|
+
}
|
|
2241
|
+
}
|
|
2242
|
+
return ok({
|
|
2243
|
+
operationsExecuted,
|
|
2244
|
+
executedOperations
|
|
2245
|
+
});
|
|
2246
|
+
}
|
|
2247
|
+
async ensureControlTables(driver) {
|
|
2248
|
+
await this.executeStatement(driver, ensurePrismaContractSchemaStatement);
|
|
2249
|
+
await this.executeStatement(driver, ensureMarkerTableStatement);
|
|
2250
|
+
await this.executeStatement(driver, ensureLedgerTableStatement);
|
|
2251
|
+
}
|
|
2252
|
+
async runExpectationSteps(driver, steps, operation, phase) {
|
|
2253
|
+
for (const step of steps) {
|
|
2254
|
+
const result = await driver.query(step.sql);
|
|
2255
|
+
if (!this.stepResultIsTrue(result.rows)) return runnerFailure(phase === "precheck" ? "PRECHECK_FAILED" : "POSTCHECK_FAILED", `Operation ${operation.id} failed during ${phase}: ${step.description}`, { meta: {
|
|
2256
|
+
operationId: operation.id,
|
|
2257
|
+
phase,
|
|
2258
|
+
stepDescription: step.description
|
|
2259
|
+
} });
|
|
2260
|
+
}
|
|
2261
|
+
return okVoid();
|
|
2262
|
+
}
|
|
2263
|
+
async runExecuteSteps(driver, steps, operation) {
|
|
2264
|
+
for (const step of steps) try {
|
|
2265
|
+
await driver.query(step.sql);
|
|
2266
|
+
} catch (error) {
|
|
2267
|
+
if (SqlQueryError.is(error)) return runnerFailure("EXECUTION_FAILED", `Operation ${operation.id} failed during execution: ${step.description}`, {
|
|
2268
|
+
why: error.message,
|
|
2269
|
+
meta: {
|
|
2270
|
+
operationId: operation.id,
|
|
2271
|
+
stepDescription: step.description,
|
|
2272
|
+
sql: step.sql,
|
|
2273
|
+
sqlState: error.sqlState,
|
|
2274
|
+
constraint: error.constraint,
|
|
2275
|
+
table: error.table,
|
|
2276
|
+
column: error.column,
|
|
2277
|
+
detail: error.detail
|
|
2278
|
+
}
|
|
2279
|
+
});
|
|
2280
|
+
throw error;
|
|
2281
|
+
}
|
|
2282
|
+
return okVoid();
|
|
2283
|
+
}
|
|
2284
|
+
stepResultIsTrue(rows) {
|
|
2285
|
+
if (!rows || rows.length === 0) return false;
|
|
2286
|
+
const firstRow = rows[0];
|
|
2287
|
+
const firstValue = firstRow ? Object.values(firstRow)[0] : void 0;
|
|
2288
|
+
if (typeof firstValue === "boolean") return firstValue;
|
|
2289
|
+
if (typeof firstValue === "number") return firstValue !== 0;
|
|
2290
|
+
if (typeof firstValue === "string") {
|
|
2291
|
+
const lower = firstValue.toLowerCase();
|
|
2292
|
+
if (lower === "t" || lower === "true" || lower === "1") return true;
|
|
2293
|
+
if (lower === "f" || lower === "false" || lower === "0") return false;
|
|
2294
|
+
return firstValue.length > 0;
|
|
2295
|
+
}
|
|
2296
|
+
return Boolean(firstValue);
|
|
2297
|
+
}
|
|
2298
|
+
async expectationsAreSatisfied(driver, steps) {
|
|
2299
|
+
if (steps.length === 0) return false;
|
|
2300
|
+
for (const step of steps) {
|
|
2301
|
+
const result = await driver.query(step.sql);
|
|
2302
|
+
if (!this.stepResultIsTrue(result.rows)) return false;
|
|
2303
|
+
}
|
|
2304
|
+
return true;
|
|
2305
|
+
}
|
|
2306
|
+
createPostcheckPreSatisfiedSkipRecord(operation) {
|
|
2307
|
+
const clonedMeta = operation.meta ? cloneAndFreezeRecord(operation.meta) : void 0;
|
|
2308
|
+
const runnerMeta = Object.freeze({
|
|
2309
|
+
skipped: true,
|
|
2310
|
+
reason: "postcheck_pre_satisfied"
|
|
2311
|
+
});
|
|
2312
|
+
const mergedMeta = Object.freeze({
|
|
2313
|
+
...clonedMeta ?? {},
|
|
2314
|
+
runner: runnerMeta
|
|
2315
|
+
});
|
|
2316
|
+
const frozenPostcheck = Object.freeze([...operation.postcheck]);
|
|
2317
|
+
return Object.freeze({
|
|
2318
|
+
id: operation.id,
|
|
2319
|
+
label: operation.label,
|
|
2320
|
+
...ifDefined("summary", operation.summary),
|
|
2321
|
+
operationClass: operation.operationClass,
|
|
2322
|
+
target: operation.target,
|
|
2323
|
+
precheck: Object.freeze([]),
|
|
2324
|
+
execute: Object.freeze([]),
|
|
2325
|
+
postcheck: frozenPostcheck,
|
|
2326
|
+
...ifDefined("meta", operation.meta || mergedMeta ? mergedMeta : void 0)
|
|
2327
|
+
});
|
|
2328
|
+
}
|
|
2329
|
+
markerMatchesDestination(marker, plan) {
|
|
2330
|
+
if (!marker) return false;
|
|
2331
|
+
if (marker.storageHash !== plan.destination.storageHash) return false;
|
|
2332
|
+
if (plan.destination.profileHash && marker.profileHash !== plan.destination.profileHash) return false;
|
|
2333
|
+
return true;
|
|
2334
|
+
}
|
|
2335
|
+
enforcePolicyCompatibility(policy, operations) {
|
|
2336
|
+
const allowedClasses = new Set(policy.allowedOperationClasses);
|
|
2337
|
+
for (const operation of operations) if (!allowedClasses.has(operation.operationClass)) return runnerFailure("POLICY_VIOLATION", `Operation ${operation.id} has class "${operation.operationClass}" which is not allowed by policy.`, {
|
|
2338
|
+
why: `Policy only allows: ${policy.allowedOperationClasses.join(", ")}.`,
|
|
2339
|
+
meta: {
|
|
2340
|
+
operationId: operation.id,
|
|
2341
|
+
operationClass: operation.operationClass,
|
|
2342
|
+
allowedClasses: policy.allowedOperationClasses
|
|
2343
|
+
}
|
|
2344
|
+
});
|
|
2345
|
+
return okVoid();
|
|
2346
|
+
}
|
|
2347
|
+
ensureMarkerCompatibility(marker, plan) {
|
|
2348
|
+
const origin = plan.origin ?? null;
|
|
2349
|
+
if (!origin) {
|
|
2350
|
+
if (!marker) return okVoid();
|
|
2351
|
+
if (this.markerMatchesDestination(marker, plan)) return okVoid();
|
|
2352
|
+
return runnerFailure("MARKER_ORIGIN_MISMATCH", `Existing contract marker (${marker.storageHash}) does not match plan origin (no marker expected).`, { meta: {
|
|
2353
|
+
markerStorageHash: marker.storageHash,
|
|
2354
|
+
expectedOrigin: null
|
|
2355
|
+
} });
|
|
2356
|
+
}
|
|
2357
|
+
if (!marker) return runnerFailure("MARKER_ORIGIN_MISMATCH", `Missing contract marker: expected origin storage hash ${origin.storageHash}.`, { meta: { expectedOriginStorageHash: origin.storageHash } });
|
|
2358
|
+
if (marker.storageHash !== origin.storageHash) return runnerFailure("MARKER_ORIGIN_MISMATCH", `Existing contract marker (${marker.storageHash}) does not match plan origin (${origin.storageHash}).`, { meta: {
|
|
2359
|
+
markerStorageHash: marker.storageHash,
|
|
2360
|
+
expectedOriginStorageHash: origin.storageHash
|
|
2361
|
+
} });
|
|
2362
|
+
if (origin.profileHash && marker.profileHash !== origin.profileHash) return runnerFailure("MARKER_ORIGIN_MISMATCH", `Existing contract marker profile hash (${marker.profileHash}) does not match plan origin profile hash (${origin.profileHash}).`, { meta: {
|
|
2363
|
+
markerProfileHash: marker.profileHash,
|
|
2364
|
+
expectedOriginProfileHash: origin.profileHash
|
|
2365
|
+
} });
|
|
2366
|
+
return okVoid();
|
|
2367
|
+
}
|
|
2368
|
+
ensurePlanMatchesDestinationContract(destination, contract) {
|
|
2369
|
+
if (destination.storageHash !== contract.storageHash) return runnerFailure("DESTINATION_CONTRACT_MISMATCH", `Plan destination storage hash (${destination.storageHash}) does not match provided contract storage hash (${contract.storageHash}).`, { meta: {
|
|
2370
|
+
planStorageHash: destination.storageHash,
|
|
2371
|
+
contractStorageHash: contract.storageHash
|
|
2372
|
+
} });
|
|
2373
|
+
if (destination.profileHash && contract.profileHash && destination.profileHash !== contract.profileHash) return runnerFailure("DESTINATION_CONTRACT_MISMATCH", `Plan destination profile hash (${destination.profileHash}) does not match provided contract profile hash (${contract.profileHash}).`, { meta: {
|
|
2374
|
+
planProfileHash: destination.profileHash,
|
|
2375
|
+
contractProfileHash: contract.profileHash
|
|
2376
|
+
} });
|
|
2377
|
+
return okVoid();
|
|
2378
|
+
}
|
|
2379
|
+
async upsertMarker(driver, options, existingMarker) {
|
|
2380
|
+
const writeStatements = buildWriteMarkerStatements({
|
|
2381
|
+
storageHash: options.plan.destination.storageHash,
|
|
2382
|
+
profileHash: options.plan.destination.profileHash ?? options.destinationContract.profileHash ?? options.plan.destination.storageHash,
|
|
2383
|
+
contractJson: options.destinationContract,
|
|
2384
|
+
canonicalVersion: null,
|
|
2385
|
+
meta: {}
|
|
2386
|
+
});
|
|
2387
|
+
const statement = existingMarker ? writeStatements.update : writeStatements.insert;
|
|
2388
|
+
await this.executeStatement(driver, statement);
|
|
2389
|
+
}
|
|
2390
|
+
async recordLedgerEntry(driver, options, existingMarker, executedOperations) {
|
|
2391
|
+
const ledgerStatement = buildLedgerInsertStatement({
|
|
2392
|
+
originStorageHash: existingMarker?.storageHash ?? null,
|
|
2393
|
+
originProfileHash: existingMarker?.profileHash ?? null,
|
|
2394
|
+
destinationStorageHash: options.plan.destination.storageHash,
|
|
2395
|
+
destinationProfileHash: options.plan.destination.profileHash ?? options.destinationContract.profileHash ?? options.plan.destination.storageHash,
|
|
2396
|
+
contractJsonBefore: existingMarker?.contractJson ?? null,
|
|
2397
|
+
contractJsonAfter: options.destinationContract,
|
|
2398
|
+
operations: executedOperations
|
|
2399
|
+
});
|
|
2400
|
+
await this.executeStatement(driver, ledgerStatement);
|
|
2401
|
+
}
|
|
2402
|
+
async acquireLock(driver, key) {
|
|
2403
|
+
await driver.query("select pg_advisory_xact_lock(hashtext($1))", [key]);
|
|
2404
|
+
}
|
|
2405
|
+
async beginTransaction(driver) {
|
|
2406
|
+
await driver.query("BEGIN");
|
|
2407
|
+
}
|
|
2408
|
+
async commitTransaction(driver) {
|
|
2409
|
+
await driver.query("COMMIT");
|
|
2410
|
+
}
|
|
2411
|
+
async rollbackTransaction(driver) {
|
|
2412
|
+
await driver.query("ROLLBACK");
|
|
2413
|
+
}
|
|
2414
|
+
async executeStatement(driver, statement) {
|
|
2415
|
+
if (statement.params.length > 0) {
|
|
2416
|
+
await driver.query(statement.sql, statement.params);
|
|
2417
|
+
return;
|
|
2418
|
+
}
|
|
2419
|
+
await driver.query(statement.sql);
|
|
2420
|
+
}
|
|
2421
|
+
};
|
|
2422
|
+
|
|
2423
|
+
//#endregion
|
|
2424
|
+
//#region src/exports/control.ts
|
|
2425
|
+
const postgresTargetDescriptor = {
|
|
2426
|
+
...postgresTargetDescriptorMeta,
|
|
2427
|
+
operationSignatures: () => [],
|
|
2428
|
+
migrations: {
|
|
2429
|
+
createPlanner(_family) {
|
|
2430
|
+
return createPostgresMigrationPlanner();
|
|
2431
|
+
},
|
|
2432
|
+
createRunner(family) {
|
|
2433
|
+
return createPostgresMigrationRunner(family);
|
|
2434
|
+
}
|
|
2435
|
+
},
|
|
2436
|
+
create() {
|
|
2437
|
+
return {
|
|
2438
|
+
familyId: "sql",
|
|
2439
|
+
targetId: "postgres"
|
|
2440
|
+
};
|
|
2441
|
+
},
|
|
2442
|
+
createPlanner(_family) {
|
|
2443
|
+
return createPostgresMigrationPlanner();
|
|
2444
|
+
},
|
|
2445
|
+
createRunner(family) {
|
|
2446
|
+
return createPostgresMigrationRunner(family);
|
|
2447
|
+
}
|
|
2448
|
+
};
|
|
2449
|
+
var control_default = postgresTargetDescriptor;
|
|
2450
|
+
|
|
2451
|
+
//#endregion
|
|
2452
|
+
export { control_default as default };
|
|
2453
|
+
//# sourceMappingURL=control.mjs.map
|