@prisma-next/migration-tools 0.5.0-dev.62 → 0.5.0-dev.63
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{constants-BQEHsaEx.mjs → constants-B87kJAGj.mjs} +1 -1
- package/dist/{constants-BQEHsaEx.mjs.map → constants-B87kJAGj.mjs.map} +1 -1
- package/dist/{errors-CfmjBeK0.mjs → errors-DQsXvidG.mjs} +22 -2
- package/dist/errors-DQsXvidG.mjs.map +1 -0
- package/dist/exports/constants.mjs +1 -1
- package/dist/exports/errors.d.mts.map +1 -1
- package/dist/exports/errors.mjs +1 -1
- package/dist/exports/graph.d.mts +1 -1
- package/dist/exports/hash.d.mts +3 -3
- package/dist/exports/hash.d.mts.map +1 -1
- package/dist/exports/hash.mjs +1 -1
- package/dist/exports/invariants.d.mts +1 -1
- package/dist/exports/invariants.mjs +2 -2
- package/dist/exports/io.d.mts +40 -5
- package/dist/exports/io.d.mts.map +1 -1
- package/dist/exports/io.mjs +4 -162
- package/dist/exports/metadata.d.mts +1 -1
- package/dist/exports/migration-graph.d.mts +3 -3
- package/dist/exports/migration-graph.d.mts.map +1 -1
- package/dist/exports/migration-graph.mjs +2 -2
- package/dist/exports/migration-graph.mjs.map +1 -1
- package/dist/exports/migration.d.mts +3 -3
- package/dist/exports/migration.d.mts.map +1 -1
- package/dist/exports/migration.mjs +4 -4
- package/dist/exports/package.d.mts +3 -2
- package/dist/exports/refs.mjs +1 -1
- package/dist/exports/spaces.d.mts +447 -0
- package/dist/exports/spaces.d.mts.map +1 -0
- package/dist/exports/spaces.mjs +433 -0
- package/dist/exports/spaces.mjs.map +1 -0
- package/dist/{graph-BHPv-9Gl.d.mts → graph-Czaj8O2q.d.mts} +1 -1
- package/dist/{graph-BHPv-9Gl.d.mts.map → graph-Czaj8O2q.d.mts.map} +1 -1
- package/dist/{hash-BARZdVgW.mjs → hash-G0bAfIGh.mjs} +2 -2
- package/dist/hash-G0bAfIGh.mjs.map +1 -0
- package/dist/{invariants-30VA65sB.mjs → invariants-4Avb_Yhy.mjs} +2 -2
- package/dist/{invariants-30VA65sB.mjs.map → invariants-4Avb_Yhy.mjs.map} +1 -1
- package/dist/io-CDJaWGbt.mjs +207 -0
- package/dist/io-CDJaWGbt.mjs.map +1 -0
- package/dist/metadata-CSjwljJx.d.mts +2 -0
- package/dist/{op-schema-DZKFua46.mjs → op-schema-BiF1ZYqH.mjs} +1 -1
- package/dist/{op-schema-DZKFua46.mjs.map → op-schema-BiF1ZYqH.mjs.map} +1 -1
- package/dist/package-B3Yl6DTr.d.mts +21 -0
- package/dist/package-B3Yl6DTr.d.mts.map +1 -0
- package/package.json +8 -4
- package/src/concatenate-space-apply-inputs.ts +90 -0
- package/src/detect-space-contract-drift.ts +95 -0
- package/src/emit-pinned-space-artefacts.ts +89 -0
- package/src/errors.ts +35 -0
- package/src/exports/io.ts +1 -0
- package/src/exports/package.ts +2 -1
- package/src/exports/spaces.ts +36 -0
- package/src/hash.ts +2 -2
- package/src/io.ts +71 -16
- package/src/metadata.ts +1 -41
- package/src/migration-graph.ts +2 -2
- package/src/package.ts +14 -11
- package/src/plan-all-spaces.ts +80 -0
- package/src/read-pinned-contract-hash.ts +77 -0
- package/src/space-layout.ts +55 -0
- package/src/verify-contract-spaces.ts +276 -0
- package/dist/errors-CfmjBeK0.mjs.map +0 -1
- package/dist/exports/io.mjs.map +0 -1
- package/dist/hash-BARZdVgW.mjs.map +0 -1
- package/dist/metadata-BP1cmU7Z.d.mts +0 -50
- package/dist/metadata-BP1cmU7Z.d.mts.map +0 -1
- package/dist/package-5HCCg0z-.d.mts +0 -21
- package/dist/package-5HCCg0z-.d.mts.map +0 -1
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
import { C as errorProvidedInvariantsMismatch, _ as errorMigrationHashMismatch, h as errorInvalidSlug, l as errorInvalidJson, r as errorDirectoryExists, s as errorInvalidDestName, u as errorInvalidManifest, v as errorMissingFile } from "./errors-DQsXvidG.mjs";
|
|
2
|
+
import { n as verifyMigrationHash, r as canonicalizeJson } from "./hash-G0bAfIGh.mjs";
|
|
3
|
+
import { t as deriveProvidedInvariants } from "./invariants-4Avb_Yhy.mjs";
|
|
4
|
+
import { n as MigrationOpsSchema } from "./op-schema-BiF1ZYqH.mjs";
|
|
5
|
+
import { basename, dirname, join, resolve } from "pathe";
|
|
6
|
+
import { copyFile, mkdir, readFile, readdir, rm, stat, writeFile } from "node:fs/promises";
|
|
7
|
+
import { type } from "arktype";
|
|
8
|
+
|
|
9
|
+
//#region src/io.ts
|
|
10
|
+
const MANIFEST_FILE = "migration.json";
|
|
11
|
+
const OPS_FILE = "ops.json";
|
|
12
|
+
const MAX_SLUG_LENGTH = 64;
|
|
13
|
+
function hasErrnoCode(error, code) {
|
|
14
|
+
return error instanceof Error && error.code === code;
|
|
15
|
+
}
|
|
16
|
+
const MigrationHintsSchema = type({
|
|
17
|
+
used: "string[]",
|
|
18
|
+
applied: "string[]",
|
|
19
|
+
plannerVersion: "string"
|
|
20
|
+
});
|
|
21
|
+
const MigrationMetadataSchema = type({
|
|
22
|
+
"+": "reject",
|
|
23
|
+
from: "string > 0 | null",
|
|
24
|
+
to: "string",
|
|
25
|
+
migrationHash: "string",
|
|
26
|
+
fromContract: "object | null",
|
|
27
|
+
toContract: "object",
|
|
28
|
+
hints: MigrationHintsSchema,
|
|
29
|
+
labels: "string[]",
|
|
30
|
+
providedInvariants: "string[]",
|
|
31
|
+
"authorship?": type({
|
|
32
|
+
"author?": "string",
|
|
33
|
+
"email?": "string"
|
|
34
|
+
}),
|
|
35
|
+
"signature?": type({
|
|
36
|
+
keyId: "string",
|
|
37
|
+
value: "string"
|
|
38
|
+
}).or("null"),
|
|
39
|
+
createdAt: "string"
|
|
40
|
+
});
|
|
41
|
+
async function writeMigrationPackage(dir, metadata, ops) {
|
|
42
|
+
await mkdir(dirname(dir), { recursive: true });
|
|
43
|
+
try {
|
|
44
|
+
await mkdir(dir);
|
|
45
|
+
} catch (error) {
|
|
46
|
+
if (hasErrnoCode(error, "EEXIST")) throw errorDirectoryExists(dir);
|
|
47
|
+
throw error;
|
|
48
|
+
}
|
|
49
|
+
await writeFile(join(dir, MANIFEST_FILE), JSON.stringify(metadata, null, 2), { flag: "wx" });
|
|
50
|
+
await writeFile(join(dir, OPS_FILE), JSON.stringify(ops, null, 2), { flag: "wx" });
|
|
51
|
+
}
|
|
52
|
+
/**
|
|
53
|
+
* Materialise an in-memory {@link MigrationPackage} to a per-space
|
|
54
|
+
* directory on disk.
|
|
55
|
+
*
|
|
56
|
+
* Writes three files under `<targetDir>/<pkg.dirName>/`:
|
|
57
|
+
*
|
|
58
|
+
* - `migration.json` — the manifest (pretty-printed, matches
|
|
59
|
+
* {@link writeMigrationPackage}'s output for byte-for-byte parity with
|
|
60
|
+
* app-space migrations).
|
|
61
|
+
* - `ops.json` — the operation list (pretty-printed).
|
|
62
|
+
* - `contract.json` — the canonical-JSON serialisation of
|
|
63
|
+
* `metadata.toContract`. This is the per-package post-state contract
|
|
64
|
+
* snapshot; the canonicalisation pass guarantees byte-determinism so
|
|
65
|
+
* re-emitting the same package across machines / runs produces an
|
|
66
|
+
* identical file.
|
|
67
|
+
*
|
|
68
|
+
* Distinct verb from the lower-level {@link writeMigrationPackage}
|
|
69
|
+
* (which takes constituent `(metadata, ops)`): callers reading
|
|
70
|
+
* `materialise…` know they are persisting a struct-typed package
|
|
71
|
+
* including its contract-snapshot side car.
|
|
72
|
+
*
|
|
73
|
+
* Overwrite-idempotent: the per-package directory is cleared before
|
|
74
|
+
* each emit, so re-running against the same `targetDir` produces
|
|
75
|
+
* byte-identical contents and never leaves stale files behind. The
|
|
76
|
+
* spec's "re-emitting the same package across runs / machines produces
|
|
77
|
+
* byte-identical files" guarantee (§ 3) covers both same-dir and
|
|
78
|
+
* fresh-dir re-emits. The lower-level {@link writeMigrationPackage}
|
|
79
|
+
* stays strict because the CLI authoring path (`migration plan` /
|
|
80
|
+
* `migration new`) deliberately refuses to clobber an existing
|
|
81
|
+
* authored migration; this helper is the re-emit path that is
|
|
82
|
+
* supposed to converge on a single canonical on-disk shape.
|
|
83
|
+
*
|
|
84
|
+
* @see specs/framework-mechanism.spec.md § 3 — Emission helper (T1.7).
|
|
85
|
+
*/
|
|
86
|
+
async function materialiseMigrationPackage(targetDir, pkg) {
|
|
87
|
+
const dir = join(targetDir, pkg.dirName);
|
|
88
|
+
await rm(dir, {
|
|
89
|
+
recursive: true,
|
|
90
|
+
force: true
|
|
91
|
+
});
|
|
92
|
+
await writeMigrationPackage(dir, pkg.metadata, pkg.ops);
|
|
93
|
+
await writeFile(join(dir, "contract.json"), `${canonicalizeJson(pkg.metadata.toContract)}\n`, { flag: "wx" });
|
|
94
|
+
}
|
|
95
|
+
/**
|
|
96
|
+
* Copy a list of files into `destDir`, optionally renaming each one.
|
|
97
|
+
*
|
|
98
|
+
* The destination directory is created (with `recursive: true`) if it
|
|
99
|
+
* does not already exist. Each source path is copied byte-for-byte into
|
|
100
|
+
* `destDir/<destName>`; missing sources throw `ENOENT`. The helper is
|
|
101
|
+
* intentionally generic: callers own the list of files (e.g. a contract
|
|
102
|
+
* emitter's emitted output) and the naming convention (e.g. renaming
|
|
103
|
+
* the destination contract to `end-contract.*` and the source contract
|
|
104
|
+
* to `start-contract.*`).
|
|
105
|
+
*/
|
|
106
|
+
async function copyFilesWithRename(destDir, files) {
|
|
107
|
+
await mkdir(destDir, { recursive: true });
|
|
108
|
+
for (const file of files) {
|
|
109
|
+
if (basename(file.destName) !== file.destName) throw errorInvalidDestName(file.destName);
|
|
110
|
+
await copyFile(file.sourcePath, join(destDir, file.destName));
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
async function writeMigrationMetadata(dir, metadata) {
|
|
114
|
+
await writeFile(join(dir, MANIFEST_FILE), `${JSON.stringify(metadata, null, 2)}\n`);
|
|
115
|
+
}
|
|
116
|
+
async function writeMigrationOps(dir, ops) {
|
|
117
|
+
await writeFile(join(dir, OPS_FILE), `${JSON.stringify(ops, null, 2)}\n`);
|
|
118
|
+
}
|
|
119
|
+
async function readMigrationPackage(dir) {
|
|
120
|
+
const absoluteDir = resolve(dir);
|
|
121
|
+
const manifestPath = join(absoluteDir, MANIFEST_FILE);
|
|
122
|
+
const opsPath = join(absoluteDir, OPS_FILE);
|
|
123
|
+
let manifestRaw;
|
|
124
|
+
try {
|
|
125
|
+
manifestRaw = await readFile(manifestPath, "utf-8");
|
|
126
|
+
} catch (error) {
|
|
127
|
+
if (hasErrnoCode(error, "ENOENT")) throw errorMissingFile(MANIFEST_FILE, absoluteDir);
|
|
128
|
+
throw error;
|
|
129
|
+
}
|
|
130
|
+
let opsRaw;
|
|
131
|
+
try {
|
|
132
|
+
opsRaw = await readFile(opsPath, "utf-8");
|
|
133
|
+
} catch (error) {
|
|
134
|
+
if (hasErrnoCode(error, "ENOENT")) throw errorMissingFile(OPS_FILE, absoluteDir);
|
|
135
|
+
throw error;
|
|
136
|
+
}
|
|
137
|
+
let metadata;
|
|
138
|
+
try {
|
|
139
|
+
metadata = JSON.parse(manifestRaw);
|
|
140
|
+
} catch (e) {
|
|
141
|
+
throw errorInvalidJson(manifestPath, e instanceof Error ? e.message : String(e));
|
|
142
|
+
}
|
|
143
|
+
let ops;
|
|
144
|
+
try {
|
|
145
|
+
ops = JSON.parse(opsRaw);
|
|
146
|
+
} catch (e) {
|
|
147
|
+
throw errorInvalidJson(opsPath, e instanceof Error ? e.message : String(e));
|
|
148
|
+
}
|
|
149
|
+
validateMetadata(metadata, manifestPath);
|
|
150
|
+
validateOps(ops, opsPath);
|
|
151
|
+
const derivedInvariants = deriveProvidedInvariants(ops);
|
|
152
|
+
if (!arraysEqual(metadata.providedInvariants, derivedInvariants)) throw errorProvidedInvariantsMismatch(manifestPath, metadata.providedInvariants, derivedInvariants);
|
|
153
|
+
const pkg = {
|
|
154
|
+
dirName: basename(absoluteDir),
|
|
155
|
+
dirPath: absoluteDir,
|
|
156
|
+
metadata,
|
|
157
|
+
ops
|
|
158
|
+
};
|
|
159
|
+
const verification = verifyMigrationHash(pkg);
|
|
160
|
+
if (!verification.ok) throw errorMigrationHashMismatch(absoluteDir, verification.storedHash, verification.computedHash);
|
|
161
|
+
return pkg;
|
|
162
|
+
}
|
|
163
|
+
function arraysEqual(a, b) {
|
|
164
|
+
if (a.length !== b.length) return false;
|
|
165
|
+
for (let i = 0; i < a.length; i++) if (a[i] !== b[i]) return false;
|
|
166
|
+
return true;
|
|
167
|
+
}
|
|
168
|
+
function validateMetadata(metadata, filePath) {
|
|
169
|
+
const result = MigrationMetadataSchema(metadata);
|
|
170
|
+
if (result instanceof type.errors) throw errorInvalidManifest(filePath, result.summary);
|
|
171
|
+
}
|
|
172
|
+
function validateOps(ops, filePath) {
|
|
173
|
+
const result = MigrationOpsSchema(ops);
|
|
174
|
+
if (result instanceof type.errors) throw errorInvalidManifest(filePath, result.summary);
|
|
175
|
+
}
|
|
176
|
+
async function readMigrationsDir(migrationsRoot) {
|
|
177
|
+
let entries;
|
|
178
|
+
try {
|
|
179
|
+
entries = await readdir(migrationsRoot);
|
|
180
|
+
} catch (error) {
|
|
181
|
+
if (hasErrnoCode(error, "ENOENT")) return [];
|
|
182
|
+
throw error;
|
|
183
|
+
}
|
|
184
|
+
const packages = [];
|
|
185
|
+
for (const entry of entries.sort()) {
|
|
186
|
+
const entryPath = join(migrationsRoot, entry);
|
|
187
|
+
if (!(await stat(entryPath)).isDirectory()) continue;
|
|
188
|
+
const manifestPath = join(entryPath, MANIFEST_FILE);
|
|
189
|
+
try {
|
|
190
|
+
await stat(manifestPath);
|
|
191
|
+
} catch {
|
|
192
|
+
continue;
|
|
193
|
+
}
|
|
194
|
+
packages.push(await readMigrationPackage(entryPath));
|
|
195
|
+
}
|
|
196
|
+
return packages;
|
|
197
|
+
}
|
|
198
|
+
function formatMigrationDirName(timestamp, slug) {
|
|
199
|
+
const sanitized = slug.toLowerCase().replace(/[^a-z0-9]/g, "_").replace(/_+/g, "_").replace(/^_|_$/g, "");
|
|
200
|
+
if (sanitized.length === 0) throw errorInvalidSlug(slug);
|
|
201
|
+
const truncated = sanitized.slice(0, MAX_SLUG_LENGTH);
|
|
202
|
+
return `${timestamp.getUTCFullYear()}${String(timestamp.getUTCMonth() + 1).padStart(2, "0")}${String(timestamp.getUTCDate()).padStart(2, "0")}T${String(timestamp.getUTCHours()).padStart(2, "0")}${String(timestamp.getUTCMinutes()).padStart(2, "0")}_${truncated}`;
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
//#endregion
|
|
206
|
+
export { readMigrationPackage as a, writeMigrationOps as c, materialiseMigrationPackage as i, writeMigrationPackage as l, copyFilesWithRename as n, readMigrationsDir as o, formatMigrationDirName as r, writeMigrationMetadata as s, MANIFEST_FILE as t };
|
|
207
|
+
//# sourceMappingURL=io-CDJaWGbt.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"io-CDJaWGbt.mjs","names":["manifestRaw: string","opsRaw: string","metadata: MigrationMetadata","ops: MigrationOps","pkg: OnDiskMigrationPackage","entries: string[]","packages: OnDiskMigrationPackage[]"],"sources":["../src/io.ts"],"sourcesContent":["import { copyFile, mkdir, readdir, readFile, rm, stat, writeFile } from 'node:fs/promises';\nimport type {\n MigrationMetadata,\n MigrationPackage,\n} from '@prisma-next/framework-components/control';\nimport { type } from 'arktype';\nimport { basename, dirname, join, resolve } from 'pathe';\nimport { canonicalizeJson } from './canonicalize-json';\nimport {\n errorDirectoryExists,\n errorInvalidDestName,\n errorInvalidJson,\n errorInvalidManifest,\n errorInvalidSlug,\n errorMigrationHashMismatch,\n errorMissingFile,\n errorProvidedInvariantsMismatch,\n} from './errors';\nimport { verifyMigrationHash } from './hash';\nimport { deriveProvidedInvariants } from './invariants';\nimport { MigrationOpsSchema } from './op-schema';\nimport type { MigrationOps, OnDiskMigrationPackage } from './package';\n\nexport const MANIFEST_FILE = 'migration.json';\nconst OPS_FILE = 'ops.json';\nconst MAX_SLUG_LENGTH = 64;\n\nfunction hasErrnoCode(error: unknown, code: string): boolean {\n return error instanceof Error && (error as { code?: string }).code === code;\n}\n\nconst MigrationHintsSchema = type({\n used: 'string[]',\n applied: 'string[]',\n plannerVersion: 'string',\n});\n\nconst MigrationMetadataSchema = type({\n '+': 'reject',\n from: 'string > 0 | null',\n to: 'string',\n migrationHash: 'string',\n fromContract: 'object | null',\n toContract: 'object',\n hints: MigrationHintsSchema,\n labels: 'string[]',\n providedInvariants: 'string[]',\n 'authorship?': type({\n 'author?': 'string',\n 'email?': 'string',\n }),\n 'signature?': type({\n keyId: 'string',\n value: 'string',\n }).or('null'),\n createdAt: 'string',\n});\n\nexport async function writeMigrationPackage(\n dir: string,\n metadata: MigrationMetadata,\n ops: MigrationOps,\n): Promise<void> {\n await mkdir(dirname(dir), { recursive: true });\n\n try {\n await mkdir(dir);\n } catch (error) {\n if (hasErrnoCode(error, 'EEXIST')) {\n throw errorDirectoryExists(dir);\n }\n throw error;\n }\n\n await writeFile(join(dir, MANIFEST_FILE), JSON.stringify(metadata, null, 2), {\n flag: 'wx',\n });\n await writeFile(join(dir, OPS_FILE), JSON.stringify(ops, null, 2), { flag: 'wx' });\n}\n\n/**\n * Materialise an in-memory {@link MigrationPackage} to a per-space\n * directory on disk.\n *\n * Writes three files under `<targetDir>/<pkg.dirName>/`:\n *\n * - `migration.json` — the manifest (pretty-printed, matches\n * {@link writeMigrationPackage}'s output for byte-for-byte parity with\n * app-space migrations).\n * - `ops.json` — the operation list (pretty-printed).\n * - `contract.json` — the canonical-JSON serialisation of\n * `metadata.toContract`. This is the per-package post-state contract\n * snapshot; the canonicalisation pass guarantees byte-determinism so\n * re-emitting the same package across machines / runs produces an\n * identical file.\n *\n * Distinct verb from the lower-level {@link writeMigrationPackage}\n * (which takes constituent `(metadata, ops)`): callers reading\n * `materialise…` know they are persisting a struct-typed package\n * including its contract-snapshot side car.\n *\n * Overwrite-idempotent: the per-package directory is cleared before\n * each emit, so re-running against the same `targetDir` produces\n * byte-identical contents and never leaves stale files behind. The\n * spec's \"re-emitting the same package across runs / machines produces\n * byte-identical files\" guarantee (§ 3) covers both same-dir and\n * fresh-dir re-emits. The lower-level {@link writeMigrationPackage}\n * stays strict because the CLI authoring path (`migration plan` /\n * `migration new`) deliberately refuses to clobber an existing\n * authored migration; this helper is the re-emit path that is\n * supposed to converge on a single canonical on-disk shape.\n *\n * @see specs/framework-mechanism.spec.md § 3 — Emission helper (T1.7).\n */\nexport async function materialiseMigrationPackage(\n targetDir: string,\n pkg: MigrationPackage,\n): Promise<void> {\n const dir = join(targetDir, pkg.dirName);\n await rm(dir, { recursive: true, force: true });\n await writeMigrationPackage(dir, pkg.metadata, pkg.ops);\n await writeFile(join(dir, 'contract.json'), `${canonicalizeJson(pkg.metadata.toContract)}\\n`, {\n flag: 'wx',\n });\n}\n\n/**\n * Copy a list of files into `destDir`, optionally renaming each one.\n *\n * The destination directory is created (with `recursive: true`) if it\n * does not already exist. Each source path is copied byte-for-byte into\n * `destDir/<destName>`; missing sources throw `ENOENT`. The helper is\n * intentionally generic: callers own the list of files (e.g. a contract\n * emitter's emitted output) and the naming convention (e.g. renaming\n * the destination contract to `end-contract.*` and the source contract\n * to `start-contract.*`).\n */\nexport async function copyFilesWithRename(\n destDir: string,\n files: readonly { readonly sourcePath: string; readonly destName: string }[],\n): Promise<void> {\n await mkdir(destDir, { recursive: true });\n for (const file of files) {\n if (basename(file.destName) !== file.destName) {\n throw errorInvalidDestName(file.destName);\n }\n await copyFile(file.sourcePath, join(destDir, file.destName));\n }\n}\n\nexport async function writeMigrationMetadata(\n dir: string,\n metadata: MigrationMetadata,\n): Promise<void> {\n await writeFile(join(dir, MANIFEST_FILE), `${JSON.stringify(metadata, null, 2)}\\n`);\n}\n\nexport async function writeMigrationOps(dir: string, ops: MigrationOps): Promise<void> {\n await writeFile(join(dir, OPS_FILE), `${JSON.stringify(ops, null, 2)}\\n`);\n}\n\nexport async function readMigrationPackage(dir: string): Promise<OnDiskMigrationPackage> {\n const absoluteDir = resolve(dir);\n const manifestPath = join(absoluteDir, MANIFEST_FILE);\n const opsPath = join(absoluteDir, OPS_FILE);\n\n let manifestRaw: string;\n try {\n manifestRaw = await readFile(manifestPath, 'utf-8');\n } catch (error) {\n if (hasErrnoCode(error, 'ENOENT')) {\n throw errorMissingFile(MANIFEST_FILE, absoluteDir);\n }\n throw error;\n }\n\n let opsRaw: string;\n try {\n opsRaw = await readFile(opsPath, 'utf-8');\n } catch (error) {\n if (hasErrnoCode(error, 'ENOENT')) {\n throw errorMissingFile(OPS_FILE, absoluteDir);\n }\n throw error;\n }\n\n let metadata: MigrationMetadata;\n try {\n metadata = JSON.parse(manifestRaw);\n } catch (e) {\n throw errorInvalidJson(manifestPath, e instanceof Error ? e.message : String(e));\n }\n\n let ops: MigrationOps;\n try {\n ops = JSON.parse(opsRaw);\n } catch (e) {\n throw errorInvalidJson(opsPath, e instanceof Error ? e.message : String(e));\n }\n\n validateMetadata(metadata, manifestPath);\n validateOps(ops, opsPath);\n\n // Re-derive before the hash check so format/duplicate diagnostics\n // fire with their dedicated codes rather than as a generic hash mismatch.\n const derivedInvariants = deriveProvidedInvariants(ops);\n if (!arraysEqual(metadata.providedInvariants, derivedInvariants)) {\n throw errorProvidedInvariantsMismatch(\n manifestPath,\n metadata.providedInvariants,\n derivedInvariants,\n );\n }\n\n const pkg: OnDiskMigrationPackage = {\n dirName: basename(absoluteDir),\n dirPath: absoluteDir,\n metadata,\n ops,\n };\n\n const verification = verifyMigrationHash(pkg);\n if (!verification.ok) {\n throw errorMigrationHashMismatch(\n absoluteDir,\n verification.storedHash,\n verification.computedHash,\n );\n }\n\n return pkg;\n}\n\nfunction arraysEqual(a: readonly string[], b: readonly string[]): boolean {\n if (a.length !== b.length) return false;\n for (let i = 0; i < a.length; i++) {\n if (a[i] !== b[i]) return false;\n }\n return true;\n}\n\nfunction validateMetadata(\n metadata: unknown,\n filePath: string,\n): asserts metadata is MigrationMetadata {\n const result = MigrationMetadataSchema(metadata);\n if (result instanceof type.errors) {\n throw errorInvalidManifest(filePath, result.summary);\n }\n}\n\nfunction validateOps(ops: unknown, filePath: string): asserts ops is MigrationOps {\n const result = MigrationOpsSchema(ops);\n if (result instanceof type.errors) {\n throw errorInvalidManifest(filePath, result.summary);\n }\n}\n\nexport async function readMigrationsDir(\n migrationsRoot: string,\n): Promise<readonly OnDiskMigrationPackage[]> {\n let entries: string[];\n try {\n entries = await readdir(migrationsRoot);\n } catch (error) {\n if (hasErrnoCode(error, 'ENOENT')) {\n return [];\n }\n throw error;\n }\n\n const packages: OnDiskMigrationPackage[] = [];\n\n for (const entry of entries.sort()) {\n const entryPath = join(migrationsRoot, entry);\n const entryStat = await stat(entryPath);\n if (!entryStat.isDirectory()) continue;\n\n const manifestPath = join(entryPath, MANIFEST_FILE);\n try {\n await stat(manifestPath);\n } catch {\n continue; // skip non-migration directories\n }\n\n packages.push(await readMigrationPackage(entryPath));\n }\n\n return packages;\n}\n\nexport function formatMigrationDirName(timestamp: Date, slug: string): string {\n const sanitized = slug\n .toLowerCase()\n .replace(/[^a-z0-9]/g, '_')\n .replace(/_+/g, '_')\n .replace(/^_|_$/g, '');\n\n if (sanitized.length === 0) {\n throw errorInvalidSlug(slug);\n }\n\n const truncated = sanitized.slice(0, MAX_SLUG_LENGTH);\n\n const y = timestamp.getUTCFullYear();\n const mo = String(timestamp.getUTCMonth() + 1).padStart(2, '0');\n const d = String(timestamp.getUTCDate()).padStart(2, '0');\n const h = String(timestamp.getUTCHours()).padStart(2, '0');\n const mi = String(timestamp.getUTCMinutes()).padStart(2, '0');\n\n return `${y}${mo}${d}T${h}${mi}_${truncated}`;\n}\n"],"mappings":";;;;;;;;;AAuBA,MAAa,gBAAgB;AAC7B,MAAM,WAAW;AACjB,MAAM,kBAAkB;AAExB,SAAS,aAAa,OAAgB,MAAuB;AAC3D,QAAO,iBAAiB,SAAU,MAA4B,SAAS;;AAGzE,MAAM,uBAAuB,KAAK;CAChC,MAAM;CACN,SAAS;CACT,gBAAgB;CACjB,CAAC;AAEF,MAAM,0BAA0B,KAAK;CACnC,KAAK;CACL,MAAM;CACN,IAAI;CACJ,eAAe;CACf,cAAc;CACd,YAAY;CACZ,OAAO;CACP,QAAQ;CACR,oBAAoB;CACpB,eAAe,KAAK;EAClB,WAAW;EACX,UAAU;EACX,CAAC;CACF,cAAc,KAAK;EACjB,OAAO;EACP,OAAO;EACR,CAAC,CAAC,GAAG,OAAO;CACb,WAAW;CACZ,CAAC;AAEF,eAAsB,sBACpB,KACA,UACA,KACe;AACf,OAAM,MAAM,QAAQ,IAAI,EAAE,EAAE,WAAW,MAAM,CAAC;AAE9C,KAAI;AACF,QAAM,MAAM,IAAI;UACT,OAAO;AACd,MAAI,aAAa,OAAO,SAAS,CAC/B,OAAM,qBAAqB,IAAI;AAEjC,QAAM;;AAGR,OAAM,UAAU,KAAK,KAAK,cAAc,EAAE,KAAK,UAAU,UAAU,MAAM,EAAE,EAAE,EAC3E,MAAM,MACP,CAAC;AACF,OAAM,UAAU,KAAK,KAAK,SAAS,EAAE,KAAK,UAAU,KAAK,MAAM,EAAE,EAAE,EAAE,MAAM,MAAM,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAqCpF,eAAsB,4BACpB,WACA,KACe;CACf,MAAM,MAAM,KAAK,WAAW,IAAI,QAAQ;AACxC,OAAM,GAAG,KAAK;EAAE,WAAW;EAAM,OAAO;EAAM,CAAC;AAC/C,OAAM,sBAAsB,KAAK,IAAI,UAAU,IAAI,IAAI;AACvD,OAAM,UAAU,KAAK,KAAK,gBAAgB,EAAE,GAAG,iBAAiB,IAAI,SAAS,WAAW,CAAC,KAAK,EAC5F,MAAM,MACP,CAAC;;;;;;;;;;;;;AAcJ,eAAsB,oBACpB,SACA,OACe;AACf,OAAM,MAAM,SAAS,EAAE,WAAW,MAAM,CAAC;AACzC,MAAK,MAAM,QAAQ,OAAO;AACxB,MAAI,SAAS,KAAK,SAAS,KAAK,KAAK,SACnC,OAAM,qBAAqB,KAAK,SAAS;AAE3C,QAAM,SAAS,KAAK,YAAY,KAAK,SAAS,KAAK,SAAS,CAAC;;;AAIjE,eAAsB,uBACpB,KACA,UACe;AACf,OAAM,UAAU,KAAK,KAAK,cAAc,EAAE,GAAG,KAAK,UAAU,UAAU,MAAM,EAAE,CAAC,IAAI;;AAGrF,eAAsB,kBAAkB,KAAa,KAAkC;AACrF,OAAM,UAAU,KAAK,KAAK,SAAS,EAAE,GAAG,KAAK,UAAU,KAAK,MAAM,EAAE,CAAC,IAAI;;AAG3E,eAAsB,qBAAqB,KAA8C;CACvF,MAAM,cAAc,QAAQ,IAAI;CAChC,MAAM,eAAe,KAAK,aAAa,cAAc;CACrD,MAAM,UAAU,KAAK,aAAa,SAAS;CAE3C,IAAIA;AACJ,KAAI;AACF,gBAAc,MAAM,SAAS,cAAc,QAAQ;UAC5C,OAAO;AACd,MAAI,aAAa,OAAO,SAAS,CAC/B,OAAM,iBAAiB,eAAe,YAAY;AAEpD,QAAM;;CAGR,IAAIC;AACJ,KAAI;AACF,WAAS,MAAM,SAAS,SAAS,QAAQ;UAClC,OAAO;AACd,MAAI,aAAa,OAAO,SAAS,CAC/B,OAAM,iBAAiB,UAAU,YAAY;AAE/C,QAAM;;CAGR,IAAIC;AACJ,KAAI;AACF,aAAW,KAAK,MAAM,YAAY;UAC3B,GAAG;AACV,QAAM,iBAAiB,cAAc,aAAa,QAAQ,EAAE,UAAU,OAAO,EAAE,CAAC;;CAGlF,IAAIC;AACJ,KAAI;AACF,QAAM,KAAK,MAAM,OAAO;UACjB,GAAG;AACV,QAAM,iBAAiB,SAAS,aAAa,QAAQ,EAAE,UAAU,OAAO,EAAE,CAAC;;AAG7E,kBAAiB,UAAU,aAAa;AACxC,aAAY,KAAK,QAAQ;CAIzB,MAAM,oBAAoB,yBAAyB,IAAI;AACvD,KAAI,CAAC,YAAY,SAAS,oBAAoB,kBAAkB,CAC9D,OAAM,gCACJ,cACA,SAAS,oBACT,kBACD;CAGH,MAAMC,MAA8B;EAClC,SAAS,SAAS,YAAY;EAC9B,SAAS;EACT;EACA;EACD;CAED,MAAM,eAAe,oBAAoB,IAAI;AAC7C,KAAI,CAAC,aAAa,GAChB,OAAM,2BACJ,aACA,aAAa,YACb,aAAa,aACd;AAGH,QAAO;;AAGT,SAAS,YAAY,GAAsB,GAA+B;AACxE,KAAI,EAAE,WAAW,EAAE,OAAQ,QAAO;AAClC,MAAK,IAAI,IAAI,GAAG,IAAI,EAAE,QAAQ,IAC5B,KAAI,EAAE,OAAO,EAAE,GAAI,QAAO;AAE5B,QAAO;;AAGT,SAAS,iBACP,UACA,UACuC;CACvC,MAAM,SAAS,wBAAwB,SAAS;AAChD,KAAI,kBAAkB,KAAK,OACzB,OAAM,qBAAqB,UAAU,OAAO,QAAQ;;AAIxD,SAAS,YAAY,KAAc,UAA+C;CAChF,MAAM,SAAS,mBAAmB,IAAI;AACtC,KAAI,kBAAkB,KAAK,OACzB,OAAM,qBAAqB,UAAU,OAAO,QAAQ;;AAIxD,eAAsB,kBACpB,gBAC4C;CAC5C,IAAIC;AACJ,KAAI;AACF,YAAU,MAAM,QAAQ,eAAe;UAChC,OAAO;AACd,MAAI,aAAa,OAAO,SAAS,CAC/B,QAAO,EAAE;AAEX,QAAM;;CAGR,MAAMC,WAAqC,EAAE;AAE7C,MAAK,MAAM,SAAS,QAAQ,MAAM,EAAE;EAClC,MAAM,YAAY,KAAK,gBAAgB,MAAM;AAE7C,MAAI,EADc,MAAM,KAAK,UAAU,EACxB,aAAa,CAAE;EAE9B,MAAM,eAAe,KAAK,WAAW,cAAc;AACnD,MAAI;AACF,SAAM,KAAK,aAAa;UAClB;AACN;;AAGF,WAAS,KAAK,MAAM,qBAAqB,UAAU,CAAC;;AAGtD,QAAO;;AAGT,SAAgB,uBAAuB,WAAiB,MAAsB;CAC5E,MAAM,YAAY,KACf,aAAa,CACb,QAAQ,cAAc,IAAI,CAC1B,QAAQ,OAAO,IAAI,CACnB,QAAQ,UAAU,GAAG;AAExB,KAAI,UAAU,WAAW,EACvB,OAAM,iBAAiB,KAAK;CAG9B,MAAM,YAAY,UAAU,MAAM,GAAG,gBAAgB;AAQrD,QAAO,GANG,UAAU,gBAAgB,GACzB,OAAO,UAAU,aAAa,GAAG,EAAE,CAAC,SAAS,GAAG,IAAI,GACrD,OAAO,UAAU,YAAY,CAAC,CAAC,SAAS,GAAG,IAAI,CAIpC,GAHX,OAAO,UAAU,aAAa,CAAC,CAAC,SAAS,GAAG,IAAI,GAC/C,OAAO,UAAU,eAAe,CAAC,CAAC,SAAS,GAAG,IAAI,CAE9B,GAAG"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"op-schema-
|
|
1
|
+
{"version":3,"file":"op-schema-BiF1ZYqH.mjs","names":[],"sources":["../src/op-schema.ts"],"sourcesContent":["import { type } from 'arktype';\n\nexport const MigrationOpSchema = type({\n id: 'string',\n label: 'string',\n operationClass: \"'additive' | 'widening' | 'destructive' | 'data'\",\n 'invariantId?': 'string',\n});\n\n// Intentionally shallow: operation-specific payload validation is owned by planner/runner layers.\nexport const MigrationOpsSchema = MigrationOpSchema.array();\n"],"mappings":";;;AAEA,MAAa,oBAAoB,KAAK;CACpC,IAAI;CACJ,OAAO;CACP,gBAAgB;CAChB,gBAAgB;CACjB,CAAC;AAGF,MAAa,qBAAqB,kBAAkB,OAAO"}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import { MigrationPackage, MigrationPlanOperation } from "@prisma-next/framework-components/control";
|
|
2
|
+
|
|
3
|
+
//#region src/package.d.ts
|
|
4
|
+
type MigrationOps = readonly MigrationPlanOperation[];
|
|
5
|
+
/**
|
|
6
|
+
* Augmented form of the canonical {@link MigrationPackage} returned by
|
|
7
|
+
* the on-disk readers (`readMigrationPackage`, `readMigrationsDir`).
|
|
8
|
+
* Adds `dirPath` — the absolute path the package was loaded from — so
|
|
9
|
+
* downstream diagnostics can point operators at a concrete directory.
|
|
10
|
+
*
|
|
11
|
+
* Holding an `OnDiskMigrationPackage` value implies the loader verified
|
|
12
|
+
* the package's integrity (hash recomputation against the stored
|
|
13
|
+
* `migrationHash`); the canonical structural shape carries no such
|
|
14
|
+
* guarantee on its own.
|
|
15
|
+
*/
|
|
16
|
+
interface OnDiskMigrationPackage extends MigrationPackage {
|
|
17
|
+
readonly dirPath: string;
|
|
18
|
+
}
|
|
19
|
+
//#endregion
|
|
20
|
+
export { OnDiskMigrationPackage as n, MigrationOps as t };
|
|
21
|
+
//# sourceMappingURL=package-B3Yl6DTr.d.mts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"package-B3Yl6DTr.d.mts","names":[],"sources":["../src/package.ts"],"sourcesContent":[],"mappings":";;;KAKY,YAAA,YAAwB;;AAApC;AAaA;;;;;;;;;UAAiB,sBAAA,SAA+B"}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@prisma-next/migration-tools",
|
|
3
|
-
"version": "0.5.0-dev.
|
|
3
|
+
"version": "0.5.0-dev.63",
|
|
4
4
|
"license": "Apache-2.0",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"sideEffects": false,
|
|
@@ -9,9 +9,9 @@
|
|
|
9
9
|
"arktype": "^2.1.29",
|
|
10
10
|
"pathe": "^2.0.3",
|
|
11
11
|
"prettier": "^3.6.2",
|
|
12
|
-
"@prisma-next/contract": "0.5.0-dev.
|
|
13
|
-
"@prisma-next/framework-components": "0.5.0-dev.
|
|
14
|
-
"@prisma-next/utils": "0.5.0-dev.
|
|
12
|
+
"@prisma-next/contract": "0.5.0-dev.63",
|
|
13
|
+
"@prisma-next/framework-components": "0.5.0-dev.63",
|
|
14
|
+
"@prisma-next/utils": "0.5.0-dev.63"
|
|
15
15
|
},
|
|
16
16
|
"devDependencies": {
|
|
17
17
|
"tsdown": "0.18.4",
|
|
@@ -76,6 +76,10 @@
|
|
|
76
76
|
"types": "./dist/exports/migration.d.mts",
|
|
77
77
|
"import": "./dist/exports/migration.mjs"
|
|
78
78
|
},
|
|
79
|
+
"./spaces": {
|
|
80
|
+
"types": "./dist/exports/spaces.d.mts",
|
|
81
|
+
"import": "./dist/exports/spaces.mjs"
|
|
82
|
+
},
|
|
79
83
|
"./package.json": "./package.json"
|
|
80
84
|
},
|
|
81
85
|
"repository": {
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
import { errorDuplicateSpaceId } from './errors';
|
|
2
|
+
import { APP_SPACE_ID } from './space-layout';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Per-space input the runner consumes when applying a migration.
|
|
6
|
+
*
|
|
7
|
+
* The shape is target-agnostic: callers (today the SQL family; later
|
|
8
|
+
* any other family) bind `TOp` to their own per-target operation type
|
|
9
|
+
* (e.g. `SqlMigrationPlanOperation<TTargetDetails>` for the SQL family)
|
|
10
|
+
* and the helper preserves it through the concatenation.
|
|
11
|
+
*
|
|
12
|
+
* - `migrationDirectory` is the on-disk migration directory for the
|
|
13
|
+
* space — `<projectRoot>/migrations` for `'app'` and
|
|
14
|
+
* `<projectRoot>/migrations/<space-id>` for an extension space.
|
|
15
|
+
* - `currentMarkerHash` and `currentMarkerInvariants` are the values
|
|
16
|
+
* read from the `prisma_contract.marker` row keyed by `space = <space-id>`
|
|
17
|
+
* (T1.1). `null` hash = no marker row yet.
|
|
18
|
+
* - `path` is the per-space operation list resolved from
|
|
19
|
+
* `findPathWithDecision(currentMarker, ref.hash, effectiveRequired)`
|
|
20
|
+
* per ADR 208, materialised against the on-disk migration packages.
|
|
21
|
+
*
|
|
22
|
+
* @see specs/framework-mechanism.spec.md § 4 — Runner.
|
|
23
|
+
*/
|
|
24
|
+
export interface SpaceApplyInput<TOp> {
|
|
25
|
+
readonly spaceId: string;
|
|
26
|
+
readonly migrationDirectory: string;
|
|
27
|
+
readonly currentMarkerHash: string | null;
|
|
28
|
+
readonly currentMarkerInvariants: readonly string[];
|
|
29
|
+
readonly path: readonly TOp[];
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Order a set of per-space apply inputs into the canonical cross-space
|
|
34
|
+
* sequence the runner applies under a single transaction.
|
|
35
|
+
*
|
|
36
|
+
* Cross-space ordering convention (sub-spec § 4):
|
|
37
|
+
*
|
|
38
|
+
* 1. **Extension spaces first**, alphabetically by `spaceId`.
|
|
39
|
+
* 2. **App space last** — only one `'app'` entry expected, at most.
|
|
40
|
+
*
|
|
41
|
+
* Rationale: extensions install their own structural objects (types,
|
|
42
|
+
* functions, helper tables) before the app's structural ops reference
|
|
43
|
+
* them. Putting app-space last lets app-space ops freely depend on any
|
|
44
|
+
* extension-space declaration in the same transaction.
|
|
45
|
+
*
|
|
46
|
+
* Determinism (NFR6): the output order is independent of the input
|
|
47
|
+
* order, so two callers with the same set of `extensionPacks` produce
|
|
48
|
+
* identical apply sequences.
|
|
49
|
+
*
|
|
50
|
+
* Atomicity: rejects duplicate `spaceId`s with
|
|
51
|
+
* `MIGRATION.DUPLICATE_SPACE_ID` before producing any output. This
|
|
52
|
+
* mirrors {@link import('./plan-all-spaces').planAllSpaces} so the
|
|
53
|
+
* planner-side and runner-side helpers reject malformed inputs the same
|
|
54
|
+
* way (callers don't need a separate dedup pass).
|
|
55
|
+
*
|
|
56
|
+
* Synchronous, pure, no I/O: callers resolve marker rows and `path`
|
|
57
|
+
* before invoking this helper. The actual DB application — driving the
|
|
58
|
+
* transaction, committing marker writes, recording the per-space marker
|
|
59
|
+
* rows — happens at the SQL-family consumption site (per the
|
|
60
|
+
* helper-location convention from R3).
|
|
61
|
+
*/
|
|
62
|
+
export function concatenateSpaceApplyInputs<TOp>(
|
|
63
|
+
inputs: readonly SpaceApplyInput<TOp>[],
|
|
64
|
+
): readonly SpaceApplyInput<TOp>[] {
|
|
65
|
+
const seen = new Set<string>();
|
|
66
|
+
for (const input of inputs) {
|
|
67
|
+
if (seen.has(input.spaceId)) {
|
|
68
|
+
throw errorDuplicateSpaceId(input.spaceId);
|
|
69
|
+
}
|
|
70
|
+
seen.add(input.spaceId);
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
const extensions: SpaceApplyInput<TOp>[] = [];
|
|
74
|
+
let appSpace: SpaceApplyInput<TOp> | undefined;
|
|
75
|
+
for (const input of inputs) {
|
|
76
|
+
if (input.spaceId === APP_SPACE_ID) {
|
|
77
|
+
appSpace = input;
|
|
78
|
+
} else {
|
|
79
|
+
extensions.push(input);
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
extensions.sort((a, b) => {
|
|
84
|
+
if (a.spaceId < b.spaceId) return -1;
|
|
85
|
+
if (a.spaceId > b.spaceId) return 1;
|
|
86
|
+
return 0;
|
|
87
|
+
});
|
|
88
|
+
|
|
89
|
+
return appSpace ? [...extensions, appSpace] : extensions;
|
|
90
|
+
}
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Inputs for {@link detectSpaceContractDrift}.
|
|
3
|
+
*
|
|
4
|
+
* Both hashes are produced by the caller (the SQL-family wiring at the
|
|
5
|
+
* consumption site) using the canonical contract hashing pipeline.
|
|
6
|
+
* Keeping the helper pure lets `migration-tools` stay framework-neutral
|
|
7
|
+
* — the SQL family already speaks `Contract<SqlStorage>`, the Mongo
|
|
8
|
+
* family speaks its own contract type, and both reduce to a hash string
|
|
9
|
+
* before drift detection runs.
|
|
10
|
+
*
|
|
11
|
+
* `pinnedHash` is `null` when no pinned `contract.json` exists yet for
|
|
12
|
+
* the space (the descriptor declares an extension that has never been
|
|
13
|
+
* emitted into the user's repo). That's the "first emit" case — no
|
|
14
|
+
* drift to surface; the migrate emit will create the pinned files.
|
|
15
|
+
*
|
|
16
|
+
* @see specs/framework-mechanism.spec.md § 3 — Drift detection (T1.9).
|
|
17
|
+
*/
|
|
18
|
+
export interface DetectSpaceContractDriftInputs {
|
|
19
|
+
readonly descriptorHash: string;
|
|
20
|
+
readonly pinnedHash: string | null;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Result discriminant for {@link detectSpaceContractDrift}.
|
|
25
|
+
*
|
|
26
|
+
* - `noDrift`: descriptor hash and pinned hash agree byte-for-byte.
|
|
27
|
+
* The migrate emit can proceed with no warning.
|
|
28
|
+
* - `firstEmit`: no pinned `contract.json` on disk yet. The extension
|
|
29
|
+
* was just added to `extensionPacks`; this run will create the
|
|
30
|
+
* pinned files. No warning either — the user's intent is to install
|
|
31
|
+
* the extension, not to "drift" from a state they haven't pinned.
|
|
32
|
+
* - `drift`: descriptor hash differs from pinned hash. The caller
|
|
33
|
+
* surfaces a non-fatal warning naming the extension and the
|
|
34
|
+
* diff direction (descriptor → pinned). The migrate emit proceeds
|
|
35
|
+
* normally so the bump is materialised this run; the warning just
|
|
36
|
+
* confirms the bump is being captured.
|
|
37
|
+
*
|
|
38
|
+
* `spaceId`, `descriptorHash`, and `pinnedHash` are threaded through
|
|
39
|
+
* verbatim so the caller (logger / TerminalUI / strict-mode envelope)
|
|
40
|
+
* has everything it needs to format the warning message without
|
|
41
|
+
* re-reading the descriptor or the pinned file.
|
|
42
|
+
*/
|
|
43
|
+
export type SpaceContractDriftResult = {
|
|
44
|
+
readonly kind: 'noDrift' | 'firstEmit' | 'drift';
|
|
45
|
+
readonly spaceId: string;
|
|
46
|
+
readonly descriptorHash: string;
|
|
47
|
+
readonly pinnedHash: string | null;
|
|
48
|
+
};
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* Pure drift-detection primitive for a single contract space.
|
|
52
|
+
*
|
|
53
|
+
* Runs once per loaded extension space, just before computing the
|
|
54
|
+
* `priorContract` that feeds {@link import('./plan-all-spaces').planAllSpaces}.
|
|
55
|
+
* Hash equality is byte-for-byte (no normalisation) — both sides are
|
|
56
|
+
* already canonical hashes produced by the same pipeline, so any
|
|
57
|
+
* difference is meaningful drift.
|
|
58
|
+
*
|
|
59
|
+
* Synchronous, pure, no I/O. The caller (SQL family in M2 R1) reads
|
|
60
|
+
* the pinned `contract.json` and computes its hash, then invokes this
|
|
61
|
+
* helper alongside the descriptor's `headRef.hash`. Composes naturally
|
|
62
|
+
* with {@link import('./read-pinned-contract-hash').readPinnedContractHash}
|
|
63
|
+
* which provides the read-side primitive.
|
|
64
|
+
*
|
|
65
|
+
* @see specs/framework-mechanism.spec.md § 3 — Drift detection (T1.9).
|
|
66
|
+
* @see specs/framework-mechanism.spec.md AM7 — drift warning surfaces
|
|
67
|
+
* the extension name and the diff direction.
|
|
68
|
+
*/
|
|
69
|
+
export function detectSpaceContractDrift(
|
|
70
|
+
spaceId: string,
|
|
71
|
+
inputs: DetectSpaceContractDriftInputs,
|
|
72
|
+
): SpaceContractDriftResult {
|
|
73
|
+
if (inputs.pinnedHash === null) {
|
|
74
|
+
return {
|
|
75
|
+
kind: 'firstEmit',
|
|
76
|
+
spaceId,
|
|
77
|
+
descriptorHash: inputs.descriptorHash,
|
|
78
|
+
pinnedHash: null,
|
|
79
|
+
};
|
|
80
|
+
}
|
|
81
|
+
if (inputs.descriptorHash === inputs.pinnedHash) {
|
|
82
|
+
return {
|
|
83
|
+
kind: 'noDrift',
|
|
84
|
+
spaceId,
|
|
85
|
+
descriptorHash: inputs.descriptorHash,
|
|
86
|
+
pinnedHash: inputs.pinnedHash,
|
|
87
|
+
};
|
|
88
|
+
}
|
|
89
|
+
return {
|
|
90
|
+
kind: 'drift',
|
|
91
|
+
spaceId,
|
|
92
|
+
descriptorHash: inputs.descriptorHash,
|
|
93
|
+
pinnedHash: inputs.pinnedHash,
|
|
94
|
+
};
|
|
95
|
+
}
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
import { mkdir, writeFile } from 'node:fs/promises';
|
|
2
|
+
import { join } from 'pathe';
|
|
3
|
+
import { canonicalizeJson } from './canonicalize-json';
|
|
4
|
+
import { errorPinnedArtefactsAppSpace } from './errors';
|
|
5
|
+
import { APP_SPACE_ID, assertValidSpaceId } from './space-layout';
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Pinned head reference for a contract space — `(hash, invariants)`.
|
|
9
|
+
* Mirrors {@link import('./refs').RefEntry} but is redeclared locally so
|
|
10
|
+
* callers can construct the input without depending on the refs module.
|
|
11
|
+
*/
|
|
12
|
+
export interface PinnedSpaceHeadRef {
|
|
13
|
+
readonly hash: string;
|
|
14
|
+
readonly invariants: readonly string[];
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* Inputs for {@link emitPinnedSpaceArtefacts}.
|
|
19
|
+
*
|
|
20
|
+
* - `contract` is the canonical contract value the framework just emitted
|
|
21
|
+
* for the space; it is serialised through {@link canonicalizeJson}, so
|
|
22
|
+
* it must be a JSON-compatible value (objects / arrays / primitives).
|
|
23
|
+
* Typed as `unknown` rather than the SQL-family `Contract<SqlStorage>`
|
|
24
|
+
* to keep `migration-tools` framework-neutral; SQL-family callers pass
|
|
25
|
+
* their typed value through unchanged.
|
|
26
|
+
*
|
|
27
|
+
* - `contractDts` is the pre-rendered `.d.ts` text. Rendering happens in
|
|
28
|
+
* the SQL family (which owns the codec / typemap input the renderer
|
|
29
|
+
* needs), so this helper accepts the text verbatim and writes it out
|
|
30
|
+
* without further transformation.
|
|
31
|
+
*
|
|
32
|
+
* - `headRef` is the pinned head reference for the space.
|
|
33
|
+
* `invariants` are sorted alphabetically before serialisation so two
|
|
34
|
+
* callers passing the same set in different orders produce
|
|
35
|
+
* byte-identical `refs/head.json`.
|
|
36
|
+
*/
|
|
37
|
+
export interface PinnedSpaceArtefactInputs {
|
|
38
|
+
readonly contract: unknown;
|
|
39
|
+
readonly contractDts: string;
|
|
40
|
+
readonly headRef: PinnedSpaceHeadRef;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Emit the pinned per-space artefacts (`contract.json`, `contract.d.ts`,
|
|
45
|
+
* `refs/head.json`) under `<projectMigrationsDir>/<spaceId>/`.
|
|
46
|
+
*
|
|
47
|
+
* Always-overwrite: the framework owns these files; running `migrate`
|
|
48
|
+
* twice with the same inputs is a no-op observably (idempotent), but the
|
|
49
|
+
* helper does not check pre-existing contents — re-emit always wins.
|
|
50
|
+
*
|
|
51
|
+
* Path layout matches the convention in
|
|
52
|
+
* [`spaceMigrationDirectory`](./space-layout.ts), with two restrictions
|
|
53
|
+
* specific to pinned artefacts:
|
|
54
|
+
*
|
|
55
|
+
* - Rejects the app space (`spaceId === APP_SPACE_ID`): the app space's
|
|
56
|
+
* canonical `contract.json` lives at the project root, not under
|
|
57
|
+
* `migrations/`. Callers that want to emit it use the app-space
|
|
58
|
+
* contract emit pipeline.
|
|
59
|
+
* - Validates `spaceId` against `[a-z][a-z0-9_-]{0,63}` via
|
|
60
|
+
* {@link assertValidSpaceId} for the same filesystem-safety reasons.
|
|
61
|
+
*
|
|
62
|
+
* The migrations directory and space subdirectory are created if they
|
|
63
|
+
* do not yet exist (`mkdir { recursive: true }`).
|
|
64
|
+
*
|
|
65
|
+
* @see specs/framework-mechanism.spec.md § 3 — Pinned artefact emission (T1.8).
|
|
66
|
+
*/
|
|
67
|
+
export async function emitPinnedSpaceArtefacts(
|
|
68
|
+
projectMigrationsDir: string,
|
|
69
|
+
spaceId: string,
|
|
70
|
+
inputs: PinnedSpaceArtefactInputs,
|
|
71
|
+
): Promise<void> {
|
|
72
|
+
if (spaceId === APP_SPACE_ID) {
|
|
73
|
+
throw errorPinnedArtefactsAppSpace();
|
|
74
|
+
}
|
|
75
|
+
assertValidSpaceId(spaceId);
|
|
76
|
+
|
|
77
|
+
const dir = join(projectMigrationsDir, spaceId);
|
|
78
|
+
await mkdir(join(dir, 'refs'), { recursive: true });
|
|
79
|
+
|
|
80
|
+
await writeFile(join(dir, 'contract.json'), `${canonicalizeJson(inputs.contract)}\n`);
|
|
81
|
+
await writeFile(join(dir, 'contract.d.ts'), inputs.contractDts);
|
|
82
|
+
|
|
83
|
+
const sortedInvariants = [...inputs.headRef.invariants].sort();
|
|
84
|
+
const headJson = canonicalizeJson({
|
|
85
|
+
hash: inputs.headRef.hash,
|
|
86
|
+
invariants: sortedInvariants,
|
|
87
|
+
});
|
|
88
|
+
await writeFile(join(dir, 'refs', 'head.json'), `${headJson}\n`);
|
|
89
|
+
}
|
package/src/errors.ts
CHANGED
|
@@ -148,6 +148,41 @@ export function errorInvalidDestName(destName: string): MigrationToolsError {
|
|
|
148
148
|
});
|
|
149
149
|
}
|
|
150
150
|
|
|
151
|
+
export function errorInvalidSpaceId(spaceId: string): MigrationToolsError {
|
|
152
|
+
return new MigrationToolsError(
|
|
153
|
+
'MIGRATION.INVALID_SPACE_ID',
|
|
154
|
+
'Invalid contract space identifier',
|
|
155
|
+
{
|
|
156
|
+
why: `The space id "${spaceId}" does not match the required pattern /^[a-z][a-z0-9_-]{0,63}$/. Space ids are used as filesystem directory names under \`migrations/\`, so the pattern is conservative on purpose.`,
|
|
157
|
+
fix: 'Pick a lowercase identifier that begins with a letter and contains only lowercase letters, digits, hyphens, or underscores; max 64 characters total.',
|
|
158
|
+
details: { spaceId },
|
|
159
|
+
},
|
|
160
|
+
);
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
export function errorPinnedArtefactsAppSpace(): MigrationToolsError {
|
|
164
|
+
return new MigrationToolsError(
|
|
165
|
+
'MIGRATION.PINNED_ARTEFACTS_APP_SPACE',
|
|
166
|
+
'Pinned per-space artefacts do not apply to the app space',
|
|
167
|
+
{
|
|
168
|
+
why: "Pinned `contract.json`/`contract.d.ts`/`refs/head.json` files only exist for extension spaces under `migrations/<space-id>/`. The app space's canonical contract lives at the project root (`contract.json`) — `emitPinnedSpaceArtefacts` is the wrong helper for it.",
|
|
169
|
+
fix: 'Pass an extension space id, or use the app-space contract emit pipeline for the project-root `contract.json` / `contract.d.ts`.',
|
|
170
|
+
},
|
|
171
|
+
);
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
export function errorDuplicateSpaceId(spaceId: string): MigrationToolsError {
|
|
175
|
+
return new MigrationToolsError(
|
|
176
|
+
'MIGRATION.DUPLICATE_SPACE_ID',
|
|
177
|
+
'Duplicate contract space identifier',
|
|
178
|
+
{
|
|
179
|
+
why: `The space id "${spaceId}" appears more than once in the per-space planner input. Each space id must be unique across the inputs (the per-space planner emits one output entry per id).`,
|
|
180
|
+
fix: 'Deduplicate the inputs before passing them to `planAllSpaces` — typically by checking your `extensionPacks` declaration for repeated entries.',
|
|
181
|
+
details: { spaceId },
|
|
182
|
+
},
|
|
183
|
+
);
|
|
184
|
+
}
|
|
185
|
+
|
|
151
186
|
export function errorSameSourceAndTarget(dir: string, hash: string): MigrationToolsError {
|
|
152
187
|
const dirName = basename(dir);
|
|
153
188
|
return new MigrationToolsError(
|
package/src/exports/io.ts
CHANGED
package/src/exports/package.ts
CHANGED
|
@@ -1 +1,2 @@
|
|
|
1
|
-
export type {
|
|
1
|
+
export type { MigrationPackage } from '@prisma-next/framework-components/control';
|
|
2
|
+
export type { MigrationOps, OnDiskMigrationPackage } from '../package';
|