@prisma-next/adapter-postgres 0.3.0-dev.6 → 0.3.0-dev.64

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. package/LICENSE +201 -0
  2. package/README.md +64 -2
  3. package/dist/adapter-DtehReRR.mjs +271 -0
  4. package/dist/adapter-DtehReRR.mjs.map +1 -0
  5. package/dist/adapter.d.mts +23 -0
  6. package/dist/adapter.d.mts.map +1 -0
  7. package/dist/adapter.mjs +5 -0
  8. package/dist/codec-ids-Bsm9c7ns.mjs +29 -0
  9. package/dist/codec-ids-Bsm9c7ns.mjs.map +1 -0
  10. package/dist/codec-types.d.mts +141 -0
  11. package/dist/codec-types.d.mts.map +1 -0
  12. package/dist/codec-types.mjs +4 -0
  13. package/dist/codecs-BfC_5c-4.mjs +207 -0
  14. package/dist/codecs-BfC_5c-4.mjs.map +1 -0
  15. package/dist/column-types.d.mts +110 -0
  16. package/dist/column-types.d.mts.map +1 -0
  17. package/dist/column-types.mjs +180 -0
  18. package/dist/column-types.mjs.map +1 -0
  19. package/dist/control.d.mts +111 -0
  20. package/dist/control.d.mts.map +1 -0
  21. package/dist/control.mjs +463 -0
  22. package/dist/control.mjs.map +1 -0
  23. package/dist/descriptor-meta-ilnFI7bx.mjs +921 -0
  24. package/dist/descriptor-meta-ilnFI7bx.mjs.map +1 -0
  25. package/dist/runtime.d.mts +19 -0
  26. package/dist/runtime.d.mts.map +1 -0
  27. package/dist/runtime.mjs +85 -0
  28. package/dist/runtime.mjs.map +1 -0
  29. package/dist/sql-utils-CSfAGEwF.mjs +78 -0
  30. package/dist/sql-utils-CSfAGEwF.mjs.map +1 -0
  31. package/dist/types-CXO7EB60.d.mts +19 -0
  32. package/dist/types-CXO7EB60.d.mts.map +1 -0
  33. package/dist/types.d.mts +2 -0
  34. package/dist/types.mjs +1 -0
  35. package/package.json +37 -46
  36. package/src/core/adapter.ts +139 -28
  37. package/src/core/codec-ids.ts +28 -0
  38. package/src/core/codecs.ts +325 -23
  39. package/src/core/control-adapter.ts +400 -178
  40. package/src/core/default-normalizer.ts +90 -0
  41. package/src/core/descriptor-meta.ts +221 -9
  42. package/src/core/enum-control-hooks.ts +735 -0
  43. package/src/core/json-schema-type-expression.ts +131 -0
  44. package/src/core/json-schema-validator.ts +53 -0
  45. package/src/core/parameterized-types.ts +118 -0
  46. package/src/core/sql-utils.ts +111 -0
  47. package/src/core/standard-schema.ts +71 -0
  48. package/src/exports/codec-types.ts +73 -1
  49. package/src/exports/column-types.ts +233 -9
  50. package/src/exports/control.ts +16 -9
  51. package/src/exports/runtime.ts +61 -18
  52. package/dist/chunk-HD5YISNQ.js +0 -47
  53. package/dist/chunk-HD5YISNQ.js.map +0 -1
  54. package/dist/chunk-J3XSOAM2.js +0 -162
  55. package/dist/chunk-J3XSOAM2.js.map +0 -1
  56. package/dist/chunk-T6S3A6VT.js +0 -301
  57. package/dist/chunk-T6S3A6VT.js.map +0 -1
  58. package/dist/core/adapter.d.ts +0 -19
  59. package/dist/core/adapter.d.ts.map +0 -1
  60. package/dist/core/codecs.d.ts +0 -110
  61. package/dist/core/codecs.d.ts.map +0 -1
  62. package/dist/core/control-adapter.d.ts +0 -33
  63. package/dist/core/control-adapter.d.ts.map +0 -1
  64. package/dist/core/descriptor-meta.d.ts +0 -72
  65. package/dist/core/descriptor-meta.d.ts.map +0 -1
  66. package/dist/core/types.d.ts +0 -16
  67. package/dist/core/types.d.ts.map +0 -1
  68. package/dist/exports/adapter.d.ts +0 -2
  69. package/dist/exports/adapter.d.ts.map +0 -1
  70. package/dist/exports/adapter.js +0 -8
  71. package/dist/exports/adapter.js.map +0 -1
  72. package/dist/exports/codec-types.d.ts +0 -11
  73. package/dist/exports/codec-types.d.ts.map +0 -1
  74. package/dist/exports/codec-types.js +0 -7
  75. package/dist/exports/codec-types.js.map +0 -1
  76. package/dist/exports/column-types.d.ts +0 -17
  77. package/dist/exports/column-types.d.ts.map +0 -1
  78. package/dist/exports/column-types.js +0 -49
  79. package/dist/exports/column-types.js.map +0 -1
  80. package/dist/exports/control.d.ts +0 -8
  81. package/dist/exports/control.d.ts.map +0 -1
  82. package/dist/exports/control.js +0 -279
  83. package/dist/exports/control.js.map +0 -1
  84. package/dist/exports/runtime.d.ts +0 -15
  85. package/dist/exports/runtime.d.ts.map +0 -1
  86. package/dist/exports/runtime.js +0 -20
  87. package/dist/exports/runtime.js.map +0 -1
  88. package/dist/exports/types.d.ts +0 -2
  89. package/dist/exports/types.d.ts.map +0 -1
  90. package/dist/exports/types.js +0 -1
  91. package/dist/exports/types.js.map +0 -1
@@ -0,0 +1,735 @@
1
+ import type { CodecControlHooks, SqlMigrationPlanOperation } from '@prisma-next/family-sql/control';
2
+ import { arraysEqual } from '@prisma-next/family-sql/schema-verify';
3
+ import type { SqlContract, SqlStorage, StorageTypeInstance } from '@prisma-next/sql-contract/types';
4
+ import type { SqlSchemaIR } from '@prisma-next/sql-schema-ir/types';
5
+ import { PG_ENUM_CODEC_ID } from './codec-ids';
6
+ import { escapeLiteral, qualifyName, quoteIdentifier, validateEnumValueLength } from './sql-utils';
7
+
8
+ /**
9
+ * Postgres enum control hooks.
10
+ *
11
+ * - Plans enum type operations for migrations
12
+ * - Verifies enum types in schema IR
13
+ * - Introspects enum types from the database
14
+ */
15
+ type EnumRow = {
16
+ schema_name: string;
17
+ type_name: string;
18
+ values: string[];
19
+ };
20
+
21
+ type EnumDiff =
22
+ | { kind: 'unchanged' }
23
+ | { kind: 'add_values'; values: readonly string[] }
24
+ | { kind: 'rebuild'; removedValues: readonly string[] };
25
+
26
+ // ============================================================================
27
+ // Introspection SQL
28
+ // ============================================================================
29
+
30
+ const ENUM_INTROSPECT_QUERY = `
31
+ SELECT
32
+ n.nspname AS schema_name,
33
+ t.typname AS type_name,
34
+ array_agg(e.enumlabel ORDER BY e.enumsortorder) AS values
35
+ FROM pg_type t
36
+ JOIN pg_namespace n ON t.typnamespace = n.oid
37
+ JOIN pg_enum e ON t.oid = e.enumtypid
38
+ WHERE n.nspname = $1
39
+ GROUP BY n.nspname, t.typname
40
+ ORDER BY n.nspname, t.typname
41
+ `;
42
+
43
+ // ============================================================================
44
+ // Schema Helpers (Simplified)
45
+ // ============================================================================
46
+
47
+ /**
48
+ * Type guard for string arrays. Used for runtime validation of introspected data.
49
+ */
50
+ function isStringArray(value: unknown): value is string[] {
51
+ return Array.isArray(value) && value.every((entry) => typeof entry === 'string');
52
+ }
53
+
54
+ /**
55
+ * Parses a PostgreSQL array value into a JavaScript string array.
56
+ *
57
+ * PostgreSQL's `pg` library may return `array_agg` results either as:
58
+ * - A JavaScript array (when type parsers are configured)
59
+ * - A string in PostgreSQL array literal format: `{value1,value2,...}`
60
+ *
61
+ * Handles PostgreSQL's quoting rules for array elements:
62
+ * - Elements containing commas, double quotes, backslashes, or whitespace are double-quoted
63
+ * - Inside quoted elements, `\"` represents `"` and `\\` represents `\`
64
+ *
65
+ * @param value - The value to parse (array or PostgreSQL array string)
66
+ * @returns A string array, or null if the value cannot be parsed
67
+ */
68
+ export function parsePostgresArray(value: unknown): string[] | null {
69
+ if (isStringArray(value)) {
70
+ return value;
71
+ }
72
+ if (typeof value === 'string' && value.startsWith('{') && value.endsWith('}')) {
73
+ const inner = value.slice(1, -1);
74
+ if (inner === '') {
75
+ return [];
76
+ }
77
+ return parseArrayElements(inner);
78
+ }
79
+ return null;
80
+ }
81
+
82
+ function parseArrayElements(input: string): string[] {
83
+ const result: string[] = [];
84
+ let i = 0;
85
+ while (i < input.length) {
86
+ if (input[i] === ',') {
87
+ i++;
88
+ continue;
89
+ }
90
+ if (input[i] === '"') {
91
+ i++;
92
+ let element = '';
93
+ while (i < input.length && input[i] !== '"') {
94
+ if (input[i] === '\\' && i + 1 < input.length) {
95
+ i++;
96
+ element += input[i];
97
+ } else {
98
+ element += input[i];
99
+ }
100
+ i++;
101
+ }
102
+ i++;
103
+ result.push(element);
104
+ } else {
105
+ const nextComma = input.indexOf(',', i);
106
+ if (nextComma === -1) {
107
+ result.push(input.slice(i).trim());
108
+ i = input.length;
109
+ } else {
110
+ result.push(input.slice(i, nextComma).trim());
111
+ i = nextComma;
112
+ }
113
+ }
114
+ }
115
+ return result;
116
+ }
117
+
118
+ /**
119
+ * Extracts enum values from a StorageTypeInstance.
120
+ * Returns null if values are missing or invalid.
121
+ */
122
+ function getEnumValues(typeInstance: StorageTypeInstance): readonly string[] | null {
123
+ const values = typeInstance.typeParams?.['values'];
124
+ return isStringArray(values) ? values : null;
125
+ }
126
+
127
+ /**
128
+ * Reads existing enum values from the schema IR for a given native type.
129
+ * Uses optional chaining to simplify navigation through the annotations structure.
130
+ */
131
+ function readExistingEnumValues(schema: SqlSchemaIR, nativeType: string): readonly string[] | null {
132
+ // Schema annotations.pg.storageTypes is populated by introspection
133
+ const storageTypes = (schema.annotations?.['pg'] as Record<string, unknown> | undefined)?.[
134
+ 'storageTypes'
135
+ ] as Record<string, StorageTypeInstance> | undefined;
136
+
137
+ const existing = storageTypes?.[nativeType];
138
+ if (!existing || existing.codecId !== PG_ENUM_CODEC_ID) {
139
+ return null;
140
+ }
141
+ return getEnumValues(existing);
142
+ }
143
+
144
+ /**
145
+ * Determines what changes are needed to transform existing enum values to desired values.
146
+ *
147
+ * Returns one of:
148
+ * - `unchanged`: No changes needed, values match exactly
149
+ * - `add_values`: New values can be safely appended (PostgreSQL supports this)
150
+ * - `rebuild`: Full enum rebuild required (value removal, reordering, or both)
151
+ *
152
+ * Note: PostgreSQL enums can only have values added (not removed or reordered) without
153
+ * a full type rebuild involving temp type creation and column migration.
154
+ *
155
+ * @param existing - Current enum values in the database
156
+ * @param desired - Target enum values from the contract
157
+ * @returns The type of change required
158
+ */
159
+ function determineEnumDiff(existing: readonly string[], desired: readonly string[]): EnumDiff {
160
+ if (arraysEqual(existing, desired)) {
161
+ return { kind: 'unchanged' };
162
+ }
163
+
164
+ // Use Sets for O(1) lookups instead of O(n) array.includes()
165
+ const existingSet = new Set(existing);
166
+ const desiredSet = new Set(desired);
167
+
168
+ const missingValues = desired.filter((value) => !existingSet.has(value));
169
+ const removedValues = existing.filter((value) => !desiredSet.has(value));
170
+ const orderMismatch =
171
+ missingValues.length === 0 && removedValues.length === 0 && !arraysEqual(existing, desired);
172
+
173
+ if (removedValues.length > 0 || orderMismatch) {
174
+ return { kind: 'rebuild', removedValues };
175
+ }
176
+
177
+ return { kind: 'add_values', values: missingValues };
178
+ }
179
+
180
+ // ============================================================================
181
+ // SQL Helpers
182
+ // ============================================================================
183
+
184
+ function enumTypeExistsCheck(schemaName: string, typeName: string, exists = true): string {
185
+ const existsClause = exists ? 'EXISTS' : 'NOT EXISTS';
186
+ return `SELECT ${existsClause} (
187
+ SELECT 1
188
+ FROM pg_type t
189
+ JOIN pg_namespace n ON t.typnamespace = n.oid
190
+ WHERE n.nspname = '${escapeLiteral(schemaName)}'
191
+ AND t.typname = '${escapeLiteral(typeName)}'
192
+ )`;
193
+ }
194
+
195
+ // ============================================================================
196
+ // Operation Builders
197
+ // ============================================================================
198
+
199
+ function buildCreateEnumOperation(
200
+ typeName: string,
201
+ nativeType: string,
202
+ schemaName: string,
203
+ values: readonly string[],
204
+ ): SqlMigrationPlanOperation<unknown> {
205
+ // Validate all enum values don't exceed PostgreSQL's label length limit
206
+ for (const value of values) {
207
+ validateEnumValueLength(value, typeName);
208
+ }
209
+ const literalValues = values.map((value) => `'${escapeLiteral(value)}'`).join(', ');
210
+ const qualifiedType = qualifyName(schemaName, nativeType);
211
+ return {
212
+ id: `type.${typeName}`,
213
+ label: `Create type ${typeName}`,
214
+ summary: `Creates enum type ${typeName}`,
215
+ operationClass: 'additive',
216
+ target: { id: 'postgres' },
217
+ precheck: [
218
+ {
219
+ description: `ensure type "${nativeType}" does not exist`,
220
+ sql: enumTypeExistsCheck(schemaName, nativeType, false),
221
+ },
222
+ ],
223
+ execute: [
224
+ {
225
+ description: `create type "${nativeType}"`,
226
+ sql: `CREATE TYPE ${qualifiedType} AS ENUM (${literalValues})`,
227
+ },
228
+ ],
229
+ postcheck: [
230
+ {
231
+ description: `verify type "${nativeType}" exists`,
232
+ sql: enumTypeExistsCheck(schemaName, nativeType),
233
+ },
234
+ ],
235
+ };
236
+ }
237
+
238
+ /**
239
+ * Computes the optimal position for inserting a new enum value to maintain
240
+ * the desired order relative to existing values.
241
+ *
242
+ * PostgreSQL's `ALTER TYPE ADD VALUE` supports BEFORE/AFTER positioning.
243
+ * This function finds the best reference value by:
244
+ * 1. Looking for the nearest preceding value that already exists
245
+ * 2. Falling back to the nearest following value if no preceding exists
246
+ * 3. Defaulting to end-of-list if no reference is found
247
+ *
248
+ * @param options.desired - The target ordered list of all enum values
249
+ * @param options.desiredIndex - Index of the value being inserted in the desired list
250
+ * @param options.current - Current list of enum values (being built up incrementally)
251
+ * @returns SQL clause (e.g., " AFTER 'x'") and insert position for tracking
252
+ */
253
+ function computeInsertPosition(options: {
254
+ desired: readonly string[];
255
+ desiredIndex: number;
256
+ current: readonly string[];
257
+ }): { clause: string; insertAt: number } {
258
+ const { desired, desiredIndex, current } = options;
259
+ const currentSet = new Set(current);
260
+ const previous = desired
261
+ .slice(0, desiredIndex)
262
+ .reverse()
263
+ .find((candidate) => currentSet.has(candidate));
264
+ const next = desired.slice(desiredIndex + 1).find((candidate) => currentSet.has(candidate));
265
+ const clause = previous
266
+ ? ` AFTER '${escapeLiteral(previous)}'`
267
+ : next
268
+ ? ` BEFORE '${escapeLiteral(next)}'`
269
+ : '';
270
+ const insertAt = previous
271
+ ? current.indexOf(previous) + 1
272
+ : next
273
+ ? current.indexOf(next)
274
+ : current.length;
275
+
276
+ return { clause, insertAt };
277
+ }
278
+
279
+ /**
280
+ * Builds operations to add new enum values to an existing PostgreSQL enum type.
281
+ *
282
+ * Each new value is added with `ALTER TYPE ... ADD VALUE IF NOT EXISTS` for idempotency.
283
+ * Values are inserted in the correct order using BEFORE/AFTER positioning to match
284
+ * the desired final order.
285
+ *
286
+ * This is a safe, non-destructive operation - existing data is not affected.
287
+ *
288
+ * @param options.typeName - Contract-level type name (e.g., 'Role')
289
+ * @param options.nativeType - PostgreSQL type name (e.g., 'role')
290
+ * @param options.schemaName - PostgreSQL schema (e.g., 'public')
291
+ * @param options.desired - Target ordered list of all enum values
292
+ * @param options.existing - Current enum values in the database
293
+ * @returns Array of migration operations to add each missing value
294
+ */
295
+ function buildAddValueOperations(options: {
296
+ typeName: string;
297
+ nativeType: string;
298
+ schemaName: string;
299
+ desired: readonly string[];
300
+ existing: readonly string[];
301
+ }): SqlMigrationPlanOperation<unknown>[] {
302
+ const { typeName, nativeType, schemaName } = options;
303
+ const current = [...options.existing];
304
+ const currentSet = new Set(current);
305
+ const operations: SqlMigrationPlanOperation<unknown>[] = [];
306
+ for (let index = 0; index < options.desired.length; index += 1) {
307
+ const value = options.desired[index];
308
+ if (value === undefined) {
309
+ continue;
310
+ }
311
+ if (currentSet.has(value)) {
312
+ continue;
313
+ }
314
+ // Validate the new value doesn't exceed PostgreSQL's label length limit
315
+ validateEnumValueLength(value, typeName);
316
+ const { clause, insertAt } = computeInsertPosition({
317
+ desired: options.desired,
318
+ desiredIndex: index,
319
+ current,
320
+ });
321
+ // Use IF NOT EXISTS for idempotency - safe to re-run after partial failures.
322
+ // Supported in PostgreSQL 9.3+, and we require PostgreSQL 12+.
323
+ operations.push({
324
+ id: `type.${typeName}.value.${value}`,
325
+ label: `Add value ${value} to ${typeName}`,
326
+ summary: `Adds enum value ${value} to ${typeName}`,
327
+ operationClass: 'widening',
328
+ target: { id: 'postgres' },
329
+ precheck: [],
330
+ execute: [
331
+ {
332
+ description: `add value "${value}" if not exists`,
333
+ sql: `ALTER TYPE ${qualifyName(schemaName, nativeType)} ADD VALUE IF NOT EXISTS '${escapeLiteral(
334
+ value,
335
+ )}'${clause}`,
336
+ },
337
+ ],
338
+ postcheck: [],
339
+ });
340
+ current.splice(insertAt, 0, value);
341
+ currentSet.add(value);
342
+ }
343
+ return operations;
344
+ }
345
+
346
+ /**
347
+ * Collects columns using the enum type from the contract (desired state).
348
+ * Used for type-safe reference tracking.
349
+ */
350
+ function collectEnumColumnsFromContract(
351
+ contract: SqlContract<SqlStorage>,
352
+ typeName: string,
353
+ nativeType: string,
354
+ ): ReadonlyArray<{ table: string; column: string }> {
355
+ const columns: Array<{ table: string; column: string }> = [];
356
+ for (const [tableName, table] of Object.entries(contract.storage.tables)) {
357
+ for (const [columnName, column] of Object.entries(table.columns)) {
358
+ if (
359
+ column.typeRef === typeName ||
360
+ (column.nativeType === nativeType && column.codecId === PG_ENUM_CODEC_ID)
361
+ ) {
362
+ columns.push({ table: tableName, column: columnName });
363
+ }
364
+ }
365
+ }
366
+ return columns;
367
+ }
368
+
369
+ /**
370
+ * Collects columns using the enum type from the schema IR (live database state).
371
+ * This ensures we find ALL dependent columns, including those added outside the contract
372
+ * (e.g., manual DDL), which is critical for safe enum rebuild operations.
373
+ */
374
+ function collectEnumColumnsFromSchema(
375
+ schema: SqlSchemaIR,
376
+ nativeType: string,
377
+ ): ReadonlyArray<{ table: string; column: string }> {
378
+ const columns: Array<{ table: string; column: string }> = [];
379
+ for (const [tableName, table] of Object.entries(schema.tables)) {
380
+ for (const [columnName, column] of Object.entries(table.columns)) {
381
+ // Match by nativeType since schema IR doesn't have codecId/typeRef
382
+ if (column.nativeType === nativeType) {
383
+ columns.push({ table: tableName, column: columnName });
384
+ }
385
+ }
386
+ }
387
+ return columns;
388
+ }
389
+
390
+ /**
391
+ * Collects all columns using the enum type from both contract AND live database.
392
+ * Merges and deduplicates to ensure we migrate ALL dependent columns during rebuild.
393
+ *
394
+ * This is critical for data integrity: if a column exists in the database using
395
+ * this enum but is not in the contract (e.g., added via manual DDL), we must
396
+ * still migrate it to avoid DROP TYPE failures.
397
+ */
398
+ function collectAllEnumColumns(
399
+ contract: SqlContract<SqlStorage>,
400
+ schema: SqlSchemaIR,
401
+ typeName: string,
402
+ nativeType: string,
403
+ ): ReadonlyArray<{ table: string; column: string }> {
404
+ const contractColumns = collectEnumColumnsFromContract(contract, typeName, nativeType);
405
+ const schemaColumns = collectEnumColumnsFromSchema(schema, nativeType);
406
+
407
+ // Merge and deduplicate using a Set of "table.column" keys
408
+ const seen = new Set<string>();
409
+ const result: Array<{ table: string; column: string }> = [];
410
+
411
+ for (const col of [...contractColumns, ...schemaColumns]) {
412
+ const key = `${col.table}.${col.column}`;
413
+ if (!seen.has(key)) {
414
+ seen.add(key);
415
+ result.push(col);
416
+ }
417
+ }
418
+
419
+ // Sort for deterministic operation order
420
+ return result.sort((a, b) => {
421
+ const tableCompare = a.table.localeCompare(b.table);
422
+ return tableCompare !== 0 ? tableCompare : a.column.localeCompare(b.column);
423
+ });
424
+ }
425
+
426
+ /**
427
+ * Builds a SQL check to verify a column's type matches an expected type.
428
+ */
429
+ function columnTypeCheck(options: {
430
+ schemaName: string;
431
+ tableName: string;
432
+ columnName: string;
433
+ expectedType: string;
434
+ }): string {
435
+ return `SELECT EXISTS (
436
+ SELECT 1
437
+ FROM information_schema.columns
438
+ WHERE table_schema = '${escapeLiteral(options.schemaName)}'
439
+ AND table_name = '${escapeLiteral(options.tableName)}'
440
+ AND column_name = '${escapeLiteral(options.columnName)}'
441
+ AND udt_name = '${escapeLiteral(options.expectedType)}'
442
+ )`;
443
+ }
444
+
445
+ /** PostgreSQL maximum identifier length (NAMEDATALEN - 1) */
446
+ const MAX_IDENTIFIER_LENGTH = 63;
447
+
448
+ /** Suffix added to enum type names during rebuild operations */
449
+ const REBUILD_SUFFIX = '__pn_rebuild';
450
+
451
+ /**
452
+ * Builds an SQL check to verify no rows contain any of the removed enum values.
453
+ * This prevents data loss during enum rebuild operations.
454
+ *
455
+ * @param schemaName - PostgreSQL schema name
456
+ * @param tableName - Table containing the enum column
457
+ * @param columnName - Column using the enum type
458
+ * @param removedValues - Array of enum values being removed
459
+ * @returns SQL query that returns true if no rows contain removed values
460
+ */
461
+ function noRemovedValuesExistCheck(
462
+ schemaName: string,
463
+ tableName: string,
464
+ columnName: string,
465
+ removedValues: readonly string[],
466
+ ): string {
467
+ if (removedValues.length === 0) {
468
+ // No values being removed, always passes
469
+ return 'SELECT true';
470
+ }
471
+ const valuesList = removedValues.map((v) => `'${escapeLiteral(v)}'`).join(', ');
472
+ return `SELECT NOT EXISTS (
473
+ SELECT 1 FROM ${qualifyName(schemaName, tableName)}
474
+ WHERE ${quoteIdentifier(columnName)}::text IN (${valuesList})
475
+ LIMIT 1
476
+ )`;
477
+ }
478
+
479
+ /**
480
+ * Builds a migration operation to recreate a PostgreSQL enum type with updated values.
481
+ *
482
+ * This is required when:
483
+ * - Enum values are removed (PostgreSQL doesn't support direct removal)
484
+ * - Enum values are reordered (PostgreSQL doesn't support reordering)
485
+ *
486
+ * The operation:
487
+ * 1. Creates a new enum type with the desired values (temp name)
488
+ * 2. Migrates all columns to use the new type via text cast
489
+ * 3. Drops the original type
490
+ * 4. Renames the temp type to the original name
491
+ *
492
+ * IMPORTANT: If values are being removed and data exists using those values,
493
+ * the operation will fail at the precheck stage with a clear error message.
494
+ * This prevents silent data loss.
495
+ *
496
+ * @param options.typeName - Contract-level type name
497
+ * @param options.nativeType - PostgreSQL type name
498
+ * @param options.schemaName - PostgreSQL schema
499
+ * @param options.values - Desired final enum values
500
+ * @param options.removedValues - Values being removed (for data loss checks)
501
+ * @param options.contract - Full contract for column discovery
502
+ * @param options.schema - Current schema IR for column discovery
503
+ * @returns Migration operation for full enum rebuild
504
+ */
505
+ function buildRecreateEnumOperation(options: {
506
+ typeName: string;
507
+ nativeType: string;
508
+ schemaName: string;
509
+ values: readonly string[];
510
+ removedValues: readonly string[];
511
+ contract: SqlContract<SqlStorage>;
512
+ schema: SqlSchemaIR;
513
+ }): SqlMigrationPlanOperation<unknown> {
514
+ const tempTypeName = `${options.nativeType}${REBUILD_SUFFIX}`;
515
+
516
+ // Validate temp type name length won't exceed PostgreSQL's 63-character limit.
517
+ // If it would, PostgreSQL silently truncates which could cause conflicts.
518
+ if (tempTypeName.length > MAX_IDENTIFIER_LENGTH) {
519
+ const maxBaseLength = MAX_IDENTIFIER_LENGTH - REBUILD_SUFFIX.length;
520
+ throw new Error(
521
+ `Enum type name "${options.nativeType}" is too long for rebuild operation. ` +
522
+ `Maximum length is ${maxBaseLength} characters (type name + "${REBUILD_SUFFIX}" suffix ` +
523
+ `must fit within PostgreSQL's ${MAX_IDENTIFIER_LENGTH}-character identifier limit).`,
524
+ );
525
+ }
526
+
527
+ const qualifiedOriginal = qualifyName(options.schemaName, options.nativeType);
528
+ const qualifiedTemp = qualifyName(options.schemaName, tempTypeName);
529
+ const literalValues = options.values.map((value) => `'${escapeLiteral(value)}'`).join(', ');
530
+
531
+ // CRITICAL: Collect columns from BOTH contract AND live database.
532
+ // This ensures we migrate ALL dependent columns, including those added
533
+ // outside of Prisma Next (e.g., manual DDL). Without this, DROP TYPE
534
+ // would fail if the database has columns not tracked in the contract.
535
+ const columnRefs = collectAllEnumColumns(
536
+ options.contract,
537
+ options.schema,
538
+ options.typeName,
539
+ options.nativeType,
540
+ );
541
+
542
+ const alterColumns = columnRefs.map((ref) => ({
543
+ description: `alter ${ref.table}.${ref.column} to ${tempTypeName}`,
544
+ sql: `ALTER TABLE ${qualifyName(options.schemaName, ref.table)}
545
+ ALTER COLUMN ${quoteIdentifier(ref.column)}
546
+ TYPE ${qualifiedTemp}
547
+ USING ${quoteIdentifier(ref.column)}::text::${qualifiedTemp}`,
548
+ }));
549
+
550
+ // Build postchecks to verify:
551
+ // 1. The final type exists with the correct name
552
+ // 2. The temp type was cleaned up (renamed away)
553
+ // 3. All migrated columns now reference the final type
554
+ const postchecks = [
555
+ {
556
+ description: `verify type "${options.nativeType}" exists`,
557
+ sql: enumTypeExistsCheck(options.schemaName, options.nativeType),
558
+ },
559
+ {
560
+ description: `verify temp type "${tempTypeName}" was removed`,
561
+ sql: enumTypeExistsCheck(options.schemaName, tempTypeName, false),
562
+ },
563
+ // Verify each column was successfully migrated to the final type
564
+ ...columnRefs.map((ref) => ({
565
+ description: `verify ${ref.table}.${ref.column} uses type "${options.nativeType}"`,
566
+ sql: columnTypeCheck({
567
+ schemaName: options.schemaName,
568
+ tableName: ref.table,
569
+ columnName: ref.column,
570
+ expectedType: options.nativeType,
571
+ }),
572
+ })),
573
+ ];
574
+
575
+ return {
576
+ id: `type.${options.typeName}.rebuild`,
577
+ label: `Rebuild type ${options.typeName}`,
578
+ summary: `Recreates enum type ${options.typeName} with updated values`,
579
+ operationClass: 'destructive',
580
+ target: { id: 'postgres' },
581
+ precheck: [
582
+ {
583
+ description: `ensure type "${options.nativeType}" exists`,
584
+ sql: enumTypeExistsCheck(options.schemaName, options.nativeType),
585
+ },
586
+ // Note: We don't precheck that temp type doesn't exist because we handle
587
+ // orphaned temp types in the execute step below.
588
+
589
+ // CRITICAL: If values are being removed, verify no data exists using those values.
590
+ // This prevents silent data loss during the rebuild - the USING cast would fail
591
+ // at runtime if rows contain values that don't exist in the new enum.
592
+ ...(options.removedValues.length > 0
593
+ ? columnRefs.map((ref) => ({
594
+ description: `ensure no rows in ${ref.table}.${ref.column} contain removed values (${options.removedValues.join(', ')})`,
595
+ sql: noRemovedValuesExistCheck(
596
+ options.schemaName,
597
+ ref.table,
598
+ ref.column,
599
+ options.removedValues,
600
+ ),
601
+ }))
602
+ : []),
603
+ ],
604
+ execute: [
605
+ // Clean up any orphaned temp type from a previous failed migration.
606
+ // This makes the operation recoverable without manual intervention.
607
+ // DROP TYPE IF EXISTS is safe - it's a no-op if the type doesn't exist.
608
+ {
609
+ description: `drop orphaned temp type "${tempTypeName}" if exists`,
610
+ sql: `DROP TYPE IF EXISTS ${qualifiedTemp}`,
611
+ },
612
+ {
613
+ description: `create temp type "${tempTypeName}"`,
614
+ sql: `CREATE TYPE ${qualifiedTemp} AS ENUM (${literalValues})`,
615
+ },
616
+ ...alterColumns,
617
+ {
618
+ description: `drop type "${options.nativeType}"`,
619
+ sql: `DROP TYPE ${qualifiedOriginal}`,
620
+ },
621
+ {
622
+ description: `rename type "${tempTypeName}" to "${options.nativeType}"`,
623
+ sql: `ALTER TYPE ${qualifiedTemp} RENAME TO ${quoteIdentifier(options.nativeType)}`,
624
+ },
625
+ ],
626
+ postcheck: postchecks,
627
+ };
628
+ }
629
+
630
+ // ============================================================================
631
+ // Codec Control Hooks
632
+ // ============================================================================
633
+
634
+ /**
635
+ * Postgres enum hooks for planning, verifying, and introspecting `storage.types`.
636
+ */
637
+ export const pgEnumControlHooks: CodecControlHooks = {
638
+ planTypeOperations: ({ typeName, typeInstance, contract, schema, schemaName }) => {
639
+ const desired = getEnumValues(typeInstance);
640
+ if (!desired || desired.length === 0) {
641
+ return { operations: [] };
642
+ }
643
+
644
+ const schemaNamespace = schemaName ?? 'public';
645
+ const existing = readExistingEnumValues(schema, typeInstance.nativeType);
646
+ if (!existing) {
647
+ return {
648
+ operations: [
649
+ buildCreateEnumOperation(typeName, typeInstance.nativeType, schemaNamespace, desired),
650
+ ],
651
+ };
652
+ }
653
+
654
+ const diff = determineEnumDiff(existing, desired);
655
+ if (diff.kind === 'unchanged') {
656
+ return { operations: [] };
657
+ }
658
+
659
+ if (diff.kind === 'rebuild') {
660
+ return {
661
+ operations: [
662
+ buildRecreateEnumOperation({
663
+ typeName,
664
+ nativeType: typeInstance.nativeType,
665
+ schemaName: schemaNamespace,
666
+ values: desired,
667
+ removedValues: diff.removedValues,
668
+ contract,
669
+ schema,
670
+ }),
671
+ ],
672
+ };
673
+ }
674
+
675
+ return {
676
+ operations: buildAddValueOperations({
677
+ typeName,
678
+ nativeType: typeInstance.nativeType,
679
+ schemaName: schemaNamespace,
680
+ desired,
681
+ existing,
682
+ }),
683
+ };
684
+ },
685
+ verifyType: ({ typeName, typeInstance, schema }) => {
686
+ const desired = getEnumValues(typeInstance);
687
+ if (!desired) {
688
+ return [];
689
+ }
690
+ const existing = readExistingEnumValues(schema, typeInstance.nativeType);
691
+ if (!existing) {
692
+ return [
693
+ {
694
+ kind: 'type_missing',
695
+ table: '',
696
+ typeName,
697
+ message: `Type "${typeName}" is missing from database`,
698
+ },
699
+ ];
700
+ }
701
+ if (!arraysEqual(existing, desired)) {
702
+ return [
703
+ {
704
+ kind: 'type_values_mismatch',
705
+ table: '',
706
+ typeName,
707
+ expected: desired.join(', '),
708
+ actual: existing.join(', '),
709
+ message: `Type "${typeName}" values do not match contract`,
710
+ },
711
+ ];
712
+ }
713
+ return [];
714
+ },
715
+ introspectTypes: async ({ driver, schemaName }) => {
716
+ const namespace = schemaName ?? 'public';
717
+ const result = await driver.query<EnumRow>(ENUM_INTROSPECT_QUERY, [namespace]);
718
+ const types: Record<string, StorageTypeInstance> = {};
719
+ for (const row of result.rows) {
720
+ const values = parsePostgresArray(row.values);
721
+ if (!values) {
722
+ throw new Error(
723
+ `Failed to parse enum values for type "${row.type_name}": ` +
724
+ `unexpected format: ${JSON.stringify(row.values)}`,
725
+ );
726
+ }
727
+ types[row.type_name] = {
728
+ codecId: PG_ENUM_CODEC_ID,
729
+ nativeType: row.type_name,
730
+ typeParams: { values },
731
+ };
732
+ }
733
+ return types;
734
+ },
735
+ };