@prisma-next/migration-tools 0.5.0-dev.8 → 0.5.0-dev.81
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +34 -22
- package/dist/{constants-BRi0X7B_.mjs → constants-DWV9_o2Z.mjs} +2 -2
- package/dist/{constants-BRi0X7B_.mjs.map → constants-DWV9_o2Z.mjs.map} +1 -1
- package/dist/errors-EPL_9p9f.mjs +297 -0
- package/dist/errors-EPL_9p9f.mjs.map +1 -0
- package/dist/exports/aggregate.d.mts +614 -0
- package/dist/exports/aggregate.d.mts.map +1 -0
- package/dist/exports/aggregate.mjs +611 -0
- package/dist/exports/aggregate.mjs.map +1 -0
- package/dist/exports/constants.d.mts.map +1 -1
- package/dist/exports/constants.mjs +2 -3
- package/dist/exports/errors.d.mts +68 -0
- package/dist/exports/errors.d.mts.map +1 -0
- package/dist/exports/errors.mjs +2 -0
- package/dist/exports/graph.d.mts +2 -0
- package/dist/exports/graph.mjs +1 -0
- package/dist/exports/hash.d.mts +52 -0
- package/dist/exports/hash.d.mts.map +1 -0
- package/dist/exports/hash.mjs +2 -0
- package/dist/exports/invariants.d.mts +34 -0
- package/dist/exports/invariants.d.mts.map +1 -0
- package/dist/exports/invariants.mjs +2 -0
- package/dist/exports/io.d.mts +66 -6
- package/dist/exports/io.d.mts.map +1 -1
- package/dist/exports/io.mjs +2 -3
- package/dist/exports/metadata.d.mts +2 -0
- package/dist/exports/metadata.mjs +1 -0
- package/dist/exports/migration-graph.d.mts +2 -0
- package/dist/exports/migration-graph.mjs +2 -0
- package/dist/exports/migration-ts.d.mts.map +1 -1
- package/dist/exports/migration-ts.mjs +2 -4
- package/dist/exports/migration-ts.mjs.map +1 -1
- package/dist/exports/migration.d.mts +15 -14
- package/dist/exports/migration.d.mts.map +1 -1
- package/dist/exports/migration.mjs +70 -43
- package/dist/exports/migration.mjs.map +1 -1
- package/dist/exports/package.d.mts +3 -0
- package/dist/exports/package.mjs +1 -0
- package/dist/exports/refs.d.mts.map +1 -1
- package/dist/exports/refs.mjs +3 -4
- package/dist/exports/refs.mjs.map +1 -1
- package/dist/exports/spaces.d.mts +550 -0
- package/dist/exports/spaces.d.mts.map +1 -0
- package/dist/exports/spaces.mjs +223 -0
- package/dist/exports/spaces.mjs.map +1 -0
- package/dist/graph-HMWAldoR.d.mts +28 -0
- package/dist/graph-HMWAldoR.d.mts.map +1 -0
- package/dist/hash-By50zM_E.mjs +74 -0
- package/dist/hash-By50zM_E.mjs.map +1 -0
- package/dist/invariants-Duc8f9NM.mjs +52 -0
- package/dist/invariants-Duc8f9NM.mjs.map +1 -0
- package/dist/io-D13dLvUh.mjs +239 -0
- package/dist/io-D13dLvUh.mjs.map +1 -0
- package/dist/metadata-CFvm3ayn.d.mts +2 -0
- package/dist/migration-graph-DGNnKDY5.mjs +523 -0
- package/dist/migration-graph-DGNnKDY5.mjs.map +1 -0
- package/dist/migration-graph-DulOITvG.d.mts +124 -0
- package/dist/migration-graph-DulOITvG.d.mts.map +1 -0
- package/dist/op-schema-D5qkXfEf.mjs +13 -0
- package/dist/op-schema-D5qkXfEf.mjs.map +1 -0
- package/dist/package-BjiZ7KDy.d.mts +21 -0
- package/dist/package-BjiZ7KDy.d.mts.map +1 -0
- package/dist/read-contract-space-contract-C3-1eyaI.mjs +298 -0
- package/dist/read-contract-space-contract-C3-1eyaI.mjs.map +1 -0
- package/package.json +42 -17
- package/src/aggregate/loader.ts +409 -0
- package/src/aggregate/marker-types.ts +16 -0
- package/src/aggregate/planner-types.ts +171 -0
- package/src/aggregate/planner.ts +158 -0
- package/src/aggregate/project-schema-to-space.ts +64 -0
- package/src/aggregate/strategies/graph-walk.ts +118 -0
- package/src/aggregate/strategies/synth.ts +122 -0
- package/src/aggregate/types.ts +89 -0
- package/src/aggregate/verifier.ts +230 -0
- package/src/assert-descriptor-self-consistency.ts +70 -0
- package/src/compute-extension-space-apply-path.ts +152 -0
- package/src/concatenate-space-apply-inputs.ts +90 -0
- package/src/detect-space-contract-drift.ts +91 -0
- package/src/emit-contract-space-artefacts.ts +70 -0
- package/src/errors.ts +251 -17
- package/src/exports/aggregate.ts +42 -0
- package/src/exports/errors.ts +8 -0
- package/src/exports/graph.ts +1 -0
- package/src/exports/hash.ts +2 -0
- package/src/exports/invariants.ts +1 -0
- package/src/exports/io.ts +3 -1
- package/src/exports/metadata.ts +1 -0
- package/src/exports/{dag.ts → migration-graph.ts} +3 -2
- package/src/exports/migration.ts +0 -1
- package/src/exports/package.ts +2 -0
- package/src/exports/spaces.ts +49 -0
- package/src/gather-disk-contract-space-state.ts +62 -0
- package/src/graph-ops.ts +57 -30
- package/src/graph.ts +25 -0
- package/src/hash.ts +91 -0
- package/src/invariants.ts +56 -0
- package/src/io.ts +163 -40
- package/src/metadata.ts +1 -0
- package/src/migration-base.ts +97 -56
- package/src/migration-graph.ts +676 -0
- package/src/op-schema.ts +11 -0
- package/src/package.ts +21 -0
- package/src/plan-all-spaces.ts +76 -0
- package/src/read-contract-space-contract.ts +44 -0
- package/src/read-contract-space-head-ref.ts +63 -0
- package/src/space-layout.ts +48 -0
- package/src/verify-contract-spaces.ts +272 -0
- package/dist/attestation-BnzTb0Qp.mjs +0 -65
- package/dist/attestation-BnzTb0Qp.mjs.map +0 -1
- package/dist/errors-BmiSgz1j.mjs +0 -160
- package/dist/errors-BmiSgz1j.mjs.map +0 -1
- package/dist/exports/attestation.d.mts +0 -37
- package/dist/exports/attestation.d.mts.map +0 -1
- package/dist/exports/attestation.mjs +0 -4
- package/dist/exports/dag.d.mts +0 -51
- package/dist/exports/dag.d.mts.map +0 -1
- package/dist/exports/dag.mjs +0 -386
- package/dist/exports/dag.mjs.map +0 -1
- package/dist/exports/types.d.mts +0 -35
- package/dist/exports/types.d.mts.map +0 -1
- package/dist/exports/types.mjs +0 -3
- package/dist/io-Cd6GLyjK.mjs +0 -153
- package/dist/io-Cd6GLyjK.mjs.map +0 -1
- package/dist/types-DyGXcWWp.d.mts +0 -71
- package/dist/types-DyGXcWWp.d.mts.map +0 -1
- package/src/attestation.ts +0 -81
- package/src/dag.ts +0 -426
- package/src/exports/attestation.ts +0 -2
- package/src/exports/types.ts +0 -10
- package/src/types.ts +0 -66
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
import { r as errorDescriptorHeadHashMismatch, s as errorDuplicateSpaceId } from "../errors-EPL_9p9f.mjs";
|
|
2
|
+
import { r as canonicalizeJson } from "../hash-By50zM_E.mjs";
|
|
3
|
+
import { s as readMigrationsDir } from "../io-D13dLvUh.mjs";
|
|
4
|
+
import "../constants-DWV9_o2Z.mjs";
|
|
5
|
+
import { l as reconstructGraph, o as findPathWithDecision } from "../migration-graph-DGNnKDY5.mjs";
|
|
6
|
+
import { a as readContractSpaceHeadRef, c as isValidSpaceId, i as detectSpaceContractDrift, l as spaceMigrationDirectory, n as listContractSpaceDirectories, o as APP_SPACE_ID, r as verifyContractSpaces, s as assertValidSpaceId, t as readContractSpaceContract } from "../read-contract-space-contract-C3-1eyaI.mjs";
|
|
7
|
+
import { join } from "pathe";
|
|
8
|
+
import { mkdir, writeFile } from "node:fs/promises";
|
|
9
|
+
import { computeStorageHash } from "@prisma-next/contract/hashing";
|
|
10
|
+
//#region src/assert-descriptor-self-consistency.ts
|
|
11
|
+
/**
|
|
12
|
+
* Assert that an extension descriptor is self-consistent: the
|
|
13
|
+
* `headRef.hash` it publishes must match the canonical hash recomputed
|
|
14
|
+
* from its `contractSpace.contractJson`.
|
|
15
|
+
*
|
|
16
|
+
* Recomputes via {@link computeStorageHash} — the same canonical-JSON
|
|
17
|
+
* pipeline the descriptor's own emit pipeline produced the hash with —
|
|
18
|
+
* over `(target, targetFamily, storage)`. Mismatch indicates the
|
|
19
|
+
* extension author bumped `contractJson` without rerunning emit, leaving
|
|
20
|
+
* the descriptor's `headRef.hash` stale; the consumer-side helpers
|
|
21
|
+
* (drift detection, on-disk artefact emission, runner marker writes) all
|
|
22
|
+
* trust `headRef.hash` as the canonical identity, so a stale value would
|
|
23
|
+
* silently corrupt every downstream boundary.
|
|
24
|
+
*
|
|
25
|
+
* Synchronous, pure, no I/O. Throws
|
|
26
|
+
* `MIGRATION.DESCRIPTOR_HEAD_HASH_MISMATCH` on failure with both the
|
|
27
|
+
* recomputed and published hashes in `details` so callers can surface a
|
|
28
|
+
* clear remediation hint without re-deriving them.
|
|
29
|
+
*/
|
|
30
|
+
function assertDescriptorSelfConsistency(inputs) {
|
|
31
|
+
const { storageHash: _stripped, ...storageWithoutHash } = inputs.storage;
|
|
32
|
+
const recomputed = computeStorageHash({
|
|
33
|
+
target: inputs.target,
|
|
34
|
+
targetFamily: inputs.targetFamily,
|
|
35
|
+
storage: storageWithoutHash
|
|
36
|
+
});
|
|
37
|
+
if (recomputed !== inputs.headRefHash) throw errorDescriptorHeadHashMismatch({
|
|
38
|
+
extensionId: inputs.extensionId,
|
|
39
|
+
recomputedHash: recomputed,
|
|
40
|
+
headRefHash: inputs.headRefHash
|
|
41
|
+
});
|
|
42
|
+
}
|
|
43
|
+
//#endregion
|
|
44
|
+
//#region src/compute-extension-space-apply-path.ts
|
|
45
|
+
/**
|
|
46
|
+
* Compute the apply path for an extension contract space — the shortest
|
|
47
|
+
* sequence of on-disk migration packages that walks the live marker
|
|
48
|
+
* forward to the on-disk head ref hash, covering every required
|
|
49
|
+
* invariant.
|
|
50
|
+
*
|
|
51
|
+
* Reads only on-disk artefacts (`migrations/<spaceId>/refs/head.json`
|
|
52
|
+
* and the per-space migration packages). **Does not import any
|
|
53
|
+
* extension descriptor module** — `db init` / `db update` must remain
|
|
54
|
+
* runnable without the descriptor source on disk.
|
|
55
|
+
*
|
|
56
|
+
* Behaviour:
|
|
57
|
+
* - Returns `{ kind: 'ok', pathOps: [], … }` when the marker is already
|
|
58
|
+
* at the recorded head and no required invariants are missing.
|
|
59
|
+
* - Returns `{ kind: 'unreachable' }` when the marker hash is not
|
|
60
|
+
* structurally connected to the recorded head in the graph.
|
|
61
|
+
* - Returns `{ kind: 'unsatisfiable', missing, … }` when the marker is
|
|
62
|
+
* reachable but no path covers the required invariants.
|
|
63
|
+
* - Returns `{ kind: 'contractSpaceHeadRefMissing' }` when the per-space
|
|
64
|
+
* `refs/head.json` is absent — the precheck verifier should already
|
|
65
|
+
* have rejected this case, but the helper is defensive so callers can
|
|
66
|
+
* surface a coherent error rather than throw.
|
|
67
|
+
*/
|
|
68
|
+
async function computeExtensionSpaceApplyPath(inputs) {
|
|
69
|
+
const { projectMigrationsDir, spaceId, currentMarkerHash, currentMarkerInvariants } = inputs;
|
|
70
|
+
const contractSpaceHeadRef = await readContractSpaceHeadRef(projectMigrationsDir, spaceId);
|
|
71
|
+
if (contractSpaceHeadRef === null) return { kind: "contractSpaceHeadRefMissing" };
|
|
72
|
+
const packages = await readMigrationsDir(spaceMigrationDirectory(projectMigrationsDir, spaceId));
|
|
73
|
+
const graph = reconstructGraph(packages);
|
|
74
|
+
const fromHash = currentMarkerHash ?? "sha256:empty";
|
|
75
|
+
const required = new Set(contractSpaceHeadRef.invariants.filter((id) => !currentMarkerInvariants.includes(id)));
|
|
76
|
+
const outcome = findPathWithDecision(graph, fromHash, contractSpaceHeadRef.hash, { required });
|
|
77
|
+
if (outcome.kind === "unreachable") return {
|
|
78
|
+
kind: "unreachable",
|
|
79
|
+
contractSpaceHeadRef
|
|
80
|
+
};
|
|
81
|
+
if (outcome.kind === "unsatisfiable") return {
|
|
82
|
+
kind: "unsatisfiable",
|
|
83
|
+
contractSpaceHeadRef,
|
|
84
|
+
missing: outcome.missing,
|
|
85
|
+
structuralPath: outcome.structuralPath.map(({ dirName, to }) => ({
|
|
86
|
+
dirName,
|
|
87
|
+
to
|
|
88
|
+
}))
|
|
89
|
+
};
|
|
90
|
+
const packagesByHash = new Map(packages.map((pkg) => [pkg.metadata.migrationHash, pkg]));
|
|
91
|
+
const pathOps = [];
|
|
92
|
+
const walkedMigrationDirs = [];
|
|
93
|
+
const providedInvariantsSet = /* @__PURE__ */ new Set();
|
|
94
|
+
for (const edge of outcome.decision.selectedPath) {
|
|
95
|
+
const pkg = packagesByHash.get(edge.migrationHash);
|
|
96
|
+
if (!pkg) throw new Error(`Migration package missing for edge ${edge.migrationHash} in space "${spaceId}"`);
|
|
97
|
+
walkedMigrationDirs.push(pkg.dirName);
|
|
98
|
+
for (const op of pkg.ops) pathOps.push(op);
|
|
99
|
+
for (const invariant of pkg.metadata.providedInvariants) providedInvariantsSet.add(invariant);
|
|
100
|
+
}
|
|
101
|
+
return {
|
|
102
|
+
kind: "ok",
|
|
103
|
+
contractSpaceHeadRef,
|
|
104
|
+
providedInvariants: [...providedInvariantsSet].sort(),
|
|
105
|
+
pathOps,
|
|
106
|
+
walkedMigrationDirs
|
|
107
|
+
};
|
|
108
|
+
}
|
|
109
|
+
//#endregion
|
|
110
|
+
//#region src/emit-contract-space-artefacts.ts
|
|
111
|
+
/**
|
|
112
|
+
* Emit the per-space artefacts (`contract.json`, `contract.d.ts`,
|
|
113
|
+
* `refs/head.json`) under `<projectMigrationsDir>/<spaceId>/`.
|
|
114
|
+
*
|
|
115
|
+
* Always-overwrite: the framework owns these files; running `migrate`
|
|
116
|
+
* twice with the same inputs is a no-op observably (idempotent), but the
|
|
117
|
+
* helper does not check pre-existing contents — re-emit always wins.
|
|
118
|
+
*
|
|
119
|
+
* Path layout matches the convention in
|
|
120
|
+
* [`spaceMigrationDirectory`](./space-layout.ts). The space id is
|
|
121
|
+
* validated against `[a-z][a-z0-9_-]{0,63}` via
|
|
122
|
+
* {@link assertValidSpaceId} for filesystem-safety reasons; the helper
|
|
123
|
+
* accepts every space uniformly (including the app space, default
|
|
124
|
+
* `'app'`).
|
|
125
|
+
*
|
|
126
|
+
* The migrations directory and space subdirectory are created if they
|
|
127
|
+
* do not yet exist (`mkdir { recursive: true }`).
|
|
128
|
+
*/
|
|
129
|
+
async function emitContractSpaceArtefacts(projectMigrationsDir, spaceId, inputs) {
|
|
130
|
+
assertValidSpaceId(spaceId);
|
|
131
|
+
const dir = join(projectMigrationsDir, spaceId);
|
|
132
|
+
await mkdir(join(dir, "refs"), { recursive: true });
|
|
133
|
+
await writeFile(join(dir, "contract.json"), `${canonicalizeJson(inputs.contract)}\n`);
|
|
134
|
+
await writeFile(join(dir, "contract.d.ts"), inputs.contractDts);
|
|
135
|
+
const sortedInvariants = [...inputs.headRef.invariants].sort();
|
|
136
|
+
const headJson = canonicalizeJson({
|
|
137
|
+
hash: inputs.headRef.hash,
|
|
138
|
+
invariants: sortedInvariants
|
|
139
|
+
});
|
|
140
|
+
await writeFile(join(dir, "refs", "head.json"), `${headJson}\n`);
|
|
141
|
+
}
|
|
142
|
+
//#endregion
|
|
143
|
+
//#region src/gather-disk-contract-space-state.ts
|
|
144
|
+
/**
|
|
145
|
+
* Read the on-disk state the per-space verifier needs:
|
|
146
|
+
*
|
|
147
|
+
* - The list of contract-space directories under
|
|
148
|
+
* `<projectMigrationsDir>/` (via
|
|
149
|
+
* {@link import('./verify-contract-spaces').listContractSpaceDirectories}).
|
|
150
|
+
* - The on-disk head ref `(hash, invariants)` for each declared extension space
|
|
151
|
+
* (via {@link readContractSpaceHeadRef}; missing on-disk artefacts are simply
|
|
152
|
+
* omitted — the verifier reports them as `declaredButUnmigrated`).
|
|
153
|
+
*
|
|
154
|
+
* Synchronous in spirit but async due to filesystem reads. Reads only
|
|
155
|
+
* the user's repo. **Does not import any extension descriptor module.**
|
|
156
|
+
*
|
|
157
|
+
* Composition convention: pure target-agnostic primitive in
|
|
158
|
+
* `1-framework`; the SQL family (and any future target family) wires
|
|
159
|
+
* it into its `dbInit` / `verify` flows alongside its own marker-row
|
|
160
|
+
* read before invoking `verifyContractSpaces`.
|
|
161
|
+
*/
|
|
162
|
+
async function gatherDiskContractSpaceState(args) {
|
|
163
|
+
const { projectMigrationsDir, loadedSpaceIds } = args;
|
|
164
|
+
const spaceDirsOnDisk = await listContractSpaceDirectories(projectMigrationsDir);
|
|
165
|
+
const headRefsBySpace = /* @__PURE__ */ new Map();
|
|
166
|
+
for (const spaceId of loadedSpaceIds) {
|
|
167
|
+
if (spaceId === APP_SPACE_ID) continue;
|
|
168
|
+
const head = await readContractSpaceHeadRef(projectMigrationsDir, spaceId);
|
|
169
|
+
if (head !== null) headRefsBySpace.set(spaceId, head);
|
|
170
|
+
}
|
|
171
|
+
return {
|
|
172
|
+
spaceDirsOnDisk,
|
|
173
|
+
headRefsBySpace
|
|
174
|
+
};
|
|
175
|
+
}
|
|
176
|
+
//#endregion
|
|
177
|
+
//#region src/plan-all-spaces.ts
|
|
178
|
+
/**
|
|
179
|
+
* Iterate the per-space planner across a set of loaded contract spaces
|
|
180
|
+
* and return a deterministic shape regardless of declaration order.
|
|
181
|
+
*
|
|
182
|
+
* Behaviour:
|
|
183
|
+
*
|
|
184
|
+
* - The output is sorted alphabetically by `spaceId`. Two callers
|
|
185
|
+
* passing the same set of inputs in different orders observe
|
|
186
|
+
* byte-identical outputs.
|
|
187
|
+
* - The per-space planner (`planSpace`) is called exactly once per
|
|
188
|
+
* input, in alphabetical-by-spaceId order. Its return value is
|
|
189
|
+
* attached to the corresponding output entry verbatim.
|
|
190
|
+
* - Duplicate `spaceId`s in the input array throw
|
|
191
|
+
* `MIGRATION.DUPLICATE_SPACE_ID` before any `planSpace` call runs,
|
|
192
|
+
* keeping the planner pure when the input is malformed.
|
|
193
|
+
*
|
|
194
|
+
* The signature is generic over `TContract` and `TPackage` because the
|
|
195
|
+
* shape is framework-neutral (SQL family today, Mongo family
|
|
196
|
+
* eventually). Callers wire in whatever contract value and migration
|
|
197
|
+
* package shape their family already speaks.
|
|
198
|
+
*
|
|
199
|
+
* Synchronous: the underlying per-space planner (target's
|
|
200
|
+
* `MigrationPlanner.plan(...)`) is synchronous; callers that need to
|
|
201
|
+
* resolve async I/O (e.g. reading on-disk `contract.json` from disk)
|
|
202
|
+
* resolve it before calling `planAllSpaces` and pass the materialised
|
|
203
|
+
* inputs through.
|
|
204
|
+
*/
|
|
205
|
+
function planAllSpaces(inputs, planSpace) {
|
|
206
|
+
const seen = /* @__PURE__ */ new Set();
|
|
207
|
+
for (const input of inputs) {
|
|
208
|
+
if (seen.has(input.spaceId)) throw errorDuplicateSpaceId(input.spaceId);
|
|
209
|
+
seen.add(input.spaceId);
|
|
210
|
+
}
|
|
211
|
+
return [...inputs].sort((a, b) => {
|
|
212
|
+
if (a.spaceId < b.spaceId) return -1;
|
|
213
|
+
if (a.spaceId > b.spaceId) return 1;
|
|
214
|
+
return 0;
|
|
215
|
+
}).map((input) => ({
|
|
216
|
+
spaceId: input.spaceId,
|
|
217
|
+
migrationPackages: planSpace(input)
|
|
218
|
+
}));
|
|
219
|
+
}
|
|
220
|
+
//#endregion
|
|
221
|
+
export { APP_SPACE_ID, assertDescriptorSelfConsistency, assertValidSpaceId, computeExtensionSpaceApplyPath, detectSpaceContractDrift, emitContractSpaceArtefacts, gatherDiskContractSpaceState, isValidSpaceId, listContractSpaceDirectories, planAllSpaces, readContractSpaceContract, readContractSpaceHeadRef, spaceMigrationDirectory, verifyContractSpaces };
|
|
222
|
+
|
|
223
|
+
//# sourceMappingURL=spaces.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"spaces.mjs","names":[],"sources":["../../src/assert-descriptor-self-consistency.ts","../../src/compute-extension-space-apply-path.ts","../../src/emit-contract-space-artefacts.ts","../../src/gather-disk-contract-space-state.ts","../../src/plan-all-spaces.ts"],"sourcesContent":["import { computeStorageHash } from '@prisma-next/contract/hashing';\nimport { errorDescriptorHeadHashMismatch } from './errors';\n\n/**\n * Inputs the helper needs to recompute the descriptor's storage hash and\n * compare it to the published `headRef.hash`. Kept structural so the SQL\n * family (and any future target family) can compose the check without\n * coupling to its own descriptor types.\n */\nexport interface DescriptorSelfConsistencyInputs {\n readonly extensionId: string;\n readonly target: string;\n readonly targetFamily: string;\n /**\n * Family-specific storage object. Typed as `unknown` so callers can\n * pass their own narrow storage shape (e.g. `SqlStorage`) without an\n * inline cast — the helper canonicalises through `JSON.stringify`\n * inside {@link computeStorageHash} and only requires a plain\n * record-shaped value at runtime.\n */\n readonly storage: unknown;\n readonly headRefHash: string;\n}\n\n/**\n * Assert that an extension descriptor is self-consistent: the\n * `headRef.hash` it publishes must match the canonical hash recomputed\n * from its `contractSpace.contractJson`.\n *\n * Recomputes via {@link computeStorageHash} — the same canonical-JSON\n * pipeline the descriptor's own emit pipeline produced the hash with —\n * over `(target, targetFamily, storage)`. Mismatch indicates the\n * extension author bumped `contractJson` without rerunning emit, leaving\n * the descriptor's `headRef.hash` stale; the consumer-side helpers\n * (drift detection, on-disk artefact emission, runner marker writes) all\n * trust `headRef.hash` as the canonical identity, so a stale value would\n * silently corrupt every downstream boundary.\n *\n * Synchronous, pure, no I/O. Throws\n * `MIGRATION.DESCRIPTOR_HEAD_HASH_MISMATCH` on failure with both the\n * recomputed and published hashes in `details` so callers can surface a\n * clear remediation hint without re-deriving them.\n */\nexport function assertDescriptorSelfConsistency(inputs: DescriptorSelfConsistencyInputs): void {\n // The published `storage.storageHash` is the *output* of the production\n // emit pipeline's `computeStorageHash` call, computed over a storage\n // object that did not yet carry `storageHash`. Recomputing against the\n // published storage as-is would feed the result back into its own input\n // and produce a different digest. Strip `storageHash` before\n // recomputing so the helper sees the same canonical shape the\n // descriptor's authoring pipeline saw.\n // The helper requires only a plain record-shaped storage value at\n // runtime; a single cast here keeps the public input type\n // family-agnostic (`unknown`) while still letting us strip the\n // descriptor-published `storageHash` before re-canonicalising.\n const storageRecord = inputs.storage as Record<string, unknown>;\n const { storageHash: _stripped, ...storageWithoutHash } = storageRecord;\n const recomputed = computeStorageHash({\n target: inputs.target,\n targetFamily: inputs.targetFamily,\n storage: storageWithoutHash,\n });\n if (recomputed !== inputs.headRefHash) {\n throw errorDescriptorHeadHashMismatch({\n extensionId: inputs.extensionId,\n recomputedHash: recomputed,\n headRefHash: inputs.headRefHash,\n });\n }\n}\n","import { EMPTY_CONTRACT_HASH } from './constants';\nimport { readMigrationsDir } from './io';\nimport { findPathWithDecision, reconstructGraph } from './migration-graph';\nimport type { MigrationOps } from './package';\nimport {\n type ContractSpaceHeadRef,\n readContractSpaceHeadRef,\n} from './read-contract-space-head-ref';\nimport { spaceMigrationDirectory } from './space-layout';\n\n/**\n * Outcome of {@link computeExtensionSpaceApplyPath} — a discriminated union\n * mirroring {@link import('./migration-graph').FindPathOutcome} so callers\n * can map structural / invariant failures to their preferred CLI envelope\n * without re-running pathfinding.\n */\nexport type ExtensionSpaceApplyPathOutcome =\n | {\n readonly kind: 'ok';\n readonly contractSpaceHeadRef: ContractSpaceHeadRef;\n /**\n * Sorted, deduplicated invariant ids covered by the walked path.\n * Mirrors the on-disk `providedInvariants` summed across edges and\n * canonicalised — what the runner stamps on the marker after apply.\n */\n readonly providedInvariants: readonly string[];\n /**\n * Path operations in apply order. Empty when the marker is already\n * at the recorded head (no-op).\n */\n readonly pathOps: MigrationOps;\n /**\n * Migration directory names walked, in order. Mirrors `pathOps`'s\n * structure but at the package granularity — useful for surfacing\n * \"applied N migration(s)\" messages.\n */\n readonly walkedMigrationDirs: readonly string[];\n }\n | { readonly kind: 'unreachable'; readonly contractSpaceHeadRef: ContractSpaceHeadRef }\n | {\n readonly kind: 'unsatisfiable';\n readonly contractSpaceHeadRef: ContractSpaceHeadRef;\n readonly missing: readonly string[];\n readonly structuralPath: readonly { readonly dirName: string; readonly to: string }[];\n }\n | { readonly kind: 'contractSpaceHeadRefMissing' };\n\n/**\n * Inputs to {@link computeExtensionSpaceApplyPath}. The helper is\n * deliberately framework-neutral and consumes only on-disk state:\n *\n * - `projectMigrationsDir` is the project's top-level `migrations/` dir.\n * - `spaceId` selects the per-space subdirectory under it.\n * - `currentMarkerHash` / `currentMarkerInvariants` come from the live\n * marker row keyed by `space = <spaceId>`. `null` hash = no marker yet\n * (the pathfinder treats this as the empty-contract sentinel per ADR\n * 208).\n */\nexport interface ComputeExtensionSpaceApplyPathInputs {\n readonly projectMigrationsDir: string;\n readonly spaceId: string;\n readonly currentMarkerHash: string | null;\n readonly currentMarkerInvariants: readonly string[];\n}\n\n/**\n * Compute the apply path for an extension contract space — the shortest\n * sequence of on-disk migration packages that walks the live marker\n * forward to the on-disk head ref hash, covering every required\n * invariant.\n *\n * Reads only on-disk artefacts (`migrations/<spaceId>/refs/head.json`\n * and the per-space migration packages). **Does not import any\n * extension descriptor module** — `db init` / `db update` must remain\n * runnable without the descriptor source on disk.\n *\n * Behaviour:\n * - Returns `{ kind: 'ok', pathOps: [], … }` when the marker is already\n * at the recorded head and no required invariants are missing.\n * - Returns `{ kind: 'unreachable' }` when the marker hash is not\n * structurally connected to the recorded head in the graph.\n * - Returns `{ kind: 'unsatisfiable', missing, … }` when the marker is\n * reachable but no path covers the required invariants.\n * - Returns `{ kind: 'contractSpaceHeadRefMissing' }` when the per-space\n * `refs/head.json` is absent — the precheck verifier should already\n * have rejected this case, but the helper is defensive so callers can\n * surface a coherent error rather than throw.\n */\nexport async function computeExtensionSpaceApplyPath(\n inputs: ComputeExtensionSpaceApplyPathInputs,\n): Promise<ExtensionSpaceApplyPathOutcome> {\n const { projectMigrationsDir, spaceId, currentMarkerHash, currentMarkerInvariants } = inputs;\n\n const contractSpaceHeadRef = await readContractSpaceHeadRef(projectMigrationsDir, spaceId);\n if (contractSpaceHeadRef === null) {\n return { kind: 'contractSpaceHeadRefMissing' };\n }\n\n const spaceDir = spaceMigrationDirectory(projectMigrationsDir, spaceId);\n const packages = await readMigrationsDir(spaceDir);\n const graph = reconstructGraph(packages);\n\n // Live-marker layer encodes \"no prior state\" as EMPTY_CONTRACT_HASH;\n // mirror the `migration apply` flow so a fresh-marker initial walk\n // hits the baseline migration whose `from` is EMPTY_CONTRACT_HASH.\n const fromHash = currentMarkerHash ?? EMPTY_CONTRACT_HASH;\n const required = new Set(\n contractSpaceHeadRef.invariants.filter((id) => !currentMarkerInvariants.includes(id)),\n );\n\n const outcome = findPathWithDecision(graph, fromHash, contractSpaceHeadRef.hash, { required });\n\n if (outcome.kind === 'unreachable') {\n return { kind: 'unreachable', contractSpaceHeadRef };\n }\n if (outcome.kind === 'unsatisfiable') {\n return {\n kind: 'unsatisfiable',\n contractSpaceHeadRef,\n missing: outcome.missing,\n structuralPath: outcome.structuralPath.map(({ dirName, to }) => ({ dirName, to })),\n };\n }\n\n const packagesByHash = new Map(packages.map((pkg) => [pkg.metadata.migrationHash, pkg]));\n\n const pathOps: MigrationOps[number][] = [];\n const walkedMigrationDirs: string[] = [];\n const providedInvariantsSet = new Set<string>();\n for (const edge of outcome.decision.selectedPath) {\n const pkg = packagesByHash.get(edge.migrationHash);\n if (!pkg) {\n // Path edges always come from the same `packages` array, so this\n // is only reachable when the graph is internally inconsistent —\n // surface it loudly rather than silently truncating the path.\n throw new Error(\n `Migration package missing for edge ${edge.migrationHash} in space \"${spaceId}\"`,\n );\n }\n walkedMigrationDirs.push(pkg.dirName);\n for (const op of pkg.ops) pathOps.push(op);\n for (const invariant of pkg.metadata.providedInvariants) providedInvariantsSet.add(invariant);\n }\n\n return {\n kind: 'ok',\n contractSpaceHeadRef,\n providedInvariants: [...providedInvariantsSet].sort(),\n pathOps,\n walkedMigrationDirs,\n };\n}\n","import { mkdir, writeFile } from 'node:fs/promises';\nimport { join } from 'pathe';\nimport { canonicalizeJson } from './canonicalize-json';\nimport type { ContractSpaceHeadRef } from './read-contract-space-head-ref';\nimport { assertValidSpaceId } from './space-layout';\n\n/**\n * Inputs for {@link emitContractSpaceArtefacts}.\n *\n * - `contract` is the canonical contract value the framework just emitted\n * for the space; it is serialised through {@link canonicalizeJson}, so\n * it must be a JSON-compatible value (objects / arrays / primitives).\n * Typed as `unknown` rather than the SQL-family `Contract<SqlStorage>`\n * to keep `migration-tools` framework-neutral; SQL-family callers pass\n * their typed value through unchanged.\n *\n * - `contractDts` is the pre-rendered `.d.ts` text. Rendering happens in\n * the SQL family (which owns the codec / typemap input the renderer\n * needs), so this helper accepts the text verbatim and writes it out\n * without further transformation.\n *\n * - `headRef` is the head reference for the space.\n * `invariants` are sorted alphabetically before serialisation so two\n * callers passing the same set in different orders produce\n * byte-identical `refs/head.json`.\n */\nexport interface ContractSpaceArtefactInputs {\n readonly contract: unknown;\n readonly contractDts: string;\n readonly headRef: ContractSpaceHeadRef;\n}\n\n/**\n * Emit the per-space artefacts (`contract.json`, `contract.d.ts`,\n * `refs/head.json`) under `<projectMigrationsDir>/<spaceId>/`.\n *\n * Always-overwrite: the framework owns these files; running `migrate`\n * twice with the same inputs is a no-op observably (idempotent), but the\n * helper does not check pre-existing contents — re-emit always wins.\n *\n * Path layout matches the convention in\n * [`spaceMigrationDirectory`](./space-layout.ts). The space id is\n * validated against `[a-z][a-z0-9_-]{0,63}` via\n * {@link assertValidSpaceId} for filesystem-safety reasons; the helper\n * accepts every space uniformly (including the app space, default\n * `'app'`).\n *\n * The migrations directory and space subdirectory are created if they\n * do not yet exist (`mkdir { recursive: true }`).\n */\nexport async function emitContractSpaceArtefacts(\n projectMigrationsDir: string,\n spaceId: string,\n inputs: ContractSpaceArtefactInputs,\n): Promise<void> {\n assertValidSpaceId(spaceId);\n\n const dir = join(projectMigrationsDir, spaceId);\n await mkdir(join(dir, 'refs'), { recursive: true });\n\n await writeFile(join(dir, 'contract.json'), `${canonicalizeJson(inputs.contract)}\\n`);\n await writeFile(join(dir, 'contract.d.ts'), inputs.contractDts);\n\n const sortedInvariants = [...inputs.headRef.invariants].sort();\n const headJson = canonicalizeJson({\n hash: inputs.headRef.hash,\n invariants: sortedInvariants,\n });\n await writeFile(join(dir, 'refs', 'head.json'), `${headJson}\\n`);\n}\n","import { readContractSpaceHeadRef } from './read-contract-space-head-ref';\nimport { APP_SPACE_ID } from './space-layout';\nimport {\n type ContractSpaceHeadRecord,\n listContractSpaceDirectories,\n} from './verify-contract-spaces';\n\n/**\n * Disk-side inputs to {@link import('./verify-contract-spaces').verifyContractSpaces}\n * — gathered without touching the live database. The caller composes\n * this with the marker rows it reads from the runtime to invoke the\n * verifier.\n */\nexport interface DiskContractSpaceState {\n /** Contract-space directory names observed under `<projectMigrationsDir>/`. */\n readonly spaceDirsOnDisk: readonly string[];\n /** Head-ref `(hash, invariants)` per extension space. */\n readonly headRefsBySpace: ReadonlyMap<string, ContractSpaceHeadRecord>;\n}\n\n/**\n * Read the on-disk state the per-space verifier needs:\n *\n * - The list of contract-space directories under\n * `<projectMigrationsDir>/` (via\n * {@link import('./verify-contract-spaces').listContractSpaceDirectories}).\n * - The on-disk head ref `(hash, invariants)` for each declared extension space\n * (via {@link readContractSpaceHeadRef}; missing on-disk artefacts are simply\n * omitted — the verifier reports them as `declaredButUnmigrated`).\n *\n * Synchronous in spirit but async due to filesystem reads. Reads only\n * the user's repo. **Does not import any extension descriptor module.**\n *\n * Composition convention: pure target-agnostic primitive in\n * `1-framework`; the SQL family (and any future target family) wires\n * it into its `dbInit` / `verify` flows alongside its own marker-row\n * read before invoking `verifyContractSpaces`.\n */\nexport async function gatherDiskContractSpaceState(args: {\n readonly projectMigrationsDir: string;\n /**\n * Set of space ids the project declares: `'app'` plus each entry in\n * `extensionPacks` whose descriptor exposes a `contractSpace`. The\n * helper reads on-disk head data only for the extension members.\n */\n readonly loadedSpaceIds: ReadonlySet<string>;\n}): Promise<DiskContractSpaceState> {\n const { projectMigrationsDir, loadedSpaceIds } = args;\n\n const spaceDirsOnDisk = await listContractSpaceDirectories(projectMigrationsDir);\n\n const headRefsBySpace = new Map<string, ContractSpaceHeadRecord>();\n for (const spaceId of loadedSpaceIds) {\n if (spaceId === APP_SPACE_ID) continue;\n const head = await readContractSpaceHeadRef(projectMigrationsDir, spaceId);\n if (head !== null) {\n headRefsBySpace.set(spaceId, head);\n }\n }\n\n return { spaceDirsOnDisk, headRefsBySpace };\n}\n","import { errorDuplicateSpaceId } from './errors';\n\n/**\n * Per-space input for {@link planAllSpaces}. One entry per loaded\n * contract space (the application's `'app'` plus each extension that\n * exposes a `contractSpace`).\n *\n * - `priorContract` is `null` for a space that has never been emitted\n * (no `migrations/<space-id>/contract.json` on disk yet); otherwise it\n * is the canonical contract value emitted for that space.\n * - `newContract` is the canonical contract value the planner is about\n * to emit for that space — for app-space, the just-emitted root\n * `contract.json`; for an extension space, the descriptor's\n * `contractSpace.contractJson`.\n */\nexport interface SpacePlanInput<TContract> {\n readonly spaceId: string;\n readonly priorContract: TContract | null;\n readonly newContract: TContract;\n}\n\nexport interface SpacePlanOutput<TPackage> {\n readonly spaceId: string;\n readonly migrationPackages: readonly TPackage[];\n}\n\n/**\n * Iterate the per-space planner across a set of loaded contract spaces\n * and return a deterministic shape regardless of declaration order.\n *\n * Behaviour:\n *\n * - The output is sorted alphabetically by `spaceId`. Two callers\n * passing the same set of inputs in different orders observe\n * byte-identical outputs.\n * - The per-space planner (`planSpace`) is called exactly once per\n * input, in alphabetical-by-spaceId order. Its return value is\n * attached to the corresponding output entry verbatim.\n * - Duplicate `spaceId`s in the input array throw\n * `MIGRATION.DUPLICATE_SPACE_ID` before any `planSpace` call runs,\n * keeping the planner pure when the input is malformed.\n *\n * The signature is generic over `TContract` and `TPackage` because the\n * shape is framework-neutral (SQL family today, Mongo family\n * eventually). Callers wire in whatever contract value and migration\n * package shape their family already speaks.\n *\n * Synchronous: the underlying per-space planner (target's\n * `MigrationPlanner.plan(...)`) is synchronous; callers that need to\n * resolve async I/O (e.g. reading on-disk `contract.json` from disk)\n * resolve it before calling `planAllSpaces` and pass the materialised\n * inputs through.\n */\nexport function planAllSpaces<TContract, TPackage>(\n inputs: readonly SpacePlanInput<TContract>[],\n planSpace: (input: SpacePlanInput<TContract>) => readonly TPackage[],\n): readonly SpacePlanOutput<TPackage>[] {\n const seen = new Set<string>();\n for (const input of inputs) {\n if (seen.has(input.spaceId)) {\n throw errorDuplicateSpaceId(input.spaceId);\n }\n seen.add(input.spaceId);\n }\n\n const sorted = [...inputs].sort((a, b) => {\n if (a.spaceId < b.spaceId) return -1;\n if (a.spaceId > b.spaceId) return 1;\n return 0;\n });\n\n return sorted.map((input) => ({\n spaceId: input.spaceId,\n migrationPackages: planSpace(input),\n }));\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2CA,SAAgB,gCAAgC,QAA+C;CAa7F,MAAM,EAAE,aAAa,WAAW,GAAG,uBADb,OAAO;CAE7B,MAAM,aAAa,mBAAmB;EACpC,QAAQ,OAAO;EACf,cAAc,OAAO;EACrB,SAAS;EACV,CAAC;CACF,IAAI,eAAe,OAAO,aACxB,MAAM,gCAAgC;EACpC,aAAa,OAAO;EACpB,gBAAgB;EAChB,aAAa,OAAO;EACrB,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;;ACqBN,eAAsB,+BACpB,QACyC;CACzC,MAAM,EAAE,sBAAsB,SAAS,mBAAmB,4BAA4B;CAEtF,MAAM,uBAAuB,MAAM,yBAAyB,sBAAsB,QAAQ;CAC1F,IAAI,yBAAyB,MAC3B,OAAO,EAAE,MAAM,+BAA+B;CAIhD,MAAM,WAAW,MAAM,kBADN,wBAAwB,sBAAsB,QACd,CAAC;CAClD,MAAM,QAAQ,iBAAiB,SAAS;CAKxC,MAAM,WAAW,qBAAA;CACjB,MAAM,WAAW,IAAI,IACnB,qBAAqB,WAAW,QAAQ,OAAO,CAAC,wBAAwB,SAAS,GAAG,CAAC,CACtF;CAED,MAAM,UAAU,qBAAqB,OAAO,UAAU,qBAAqB,MAAM,EAAE,UAAU,CAAC;CAE9F,IAAI,QAAQ,SAAS,eACnB,OAAO;EAAE,MAAM;EAAe;EAAsB;CAEtD,IAAI,QAAQ,SAAS,iBACnB,OAAO;EACL,MAAM;EACN;EACA,SAAS,QAAQ;EACjB,gBAAgB,QAAQ,eAAe,KAAK,EAAE,SAAS,UAAU;GAAE;GAAS;GAAI,EAAE;EACnF;CAGH,MAAM,iBAAiB,IAAI,IAAI,SAAS,KAAK,QAAQ,CAAC,IAAI,SAAS,eAAe,IAAI,CAAC,CAAC;CAExF,MAAM,UAAkC,EAAE;CAC1C,MAAM,sBAAgC,EAAE;CACxC,MAAM,wCAAwB,IAAI,KAAa;CAC/C,KAAK,MAAM,QAAQ,QAAQ,SAAS,cAAc;EAChD,MAAM,MAAM,eAAe,IAAI,KAAK,cAAc;EAClD,IAAI,CAAC,KAIH,MAAM,IAAI,MACR,sCAAsC,KAAK,cAAc,aAAa,QAAQ,GAC/E;EAEH,oBAAoB,KAAK,IAAI,QAAQ;EACrC,KAAK,MAAM,MAAM,IAAI,KAAK,QAAQ,KAAK,GAAG;EAC1C,KAAK,MAAM,aAAa,IAAI,SAAS,oBAAoB,sBAAsB,IAAI,UAAU;;CAG/F,OAAO;EACL,MAAM;EACN;EACA,oBAAoB,CAAC,GAAG,sBAAsB,CAAC,MAAM;EACrD;EACA;EACD;;;;;;;;;;;;;;;;;;;;;;ACpGH,eAAsB,2BACpB,sBACA,SACA,QACe;CACf,mBAAmB,QAAQ;CAE3B,MAAM,MAAM,KAAK,sBAAsB,QAAQ;CAC/C,MAAM,MAAM,KAAK,KAAK,OAAO,EAAE,EAAE,WAAW,MAAM,CAAC;CAEnD,MAAM,UAAU,KAAK,KAAK,gBAAgB,EAAE,GAAG,iBAAiB,OAAO,SAAS,CAAC,IAAI;CACrF,MAAM,UAAU,KAAK,KAAK,gBAAgB,EAAE,OAAO,YAAY;CAE/D,MAAM,mBAAmB,CAAC,GAAG,OAAO,QAAQ,WAAW,CAAC,MAAM;CAC9D,MAAM,WAAW,iBAAiB;EAChC,MAAM,OAAO,QAAQ;EACrB,YAAY;EACb,CAAC;CACF,MAAM,UAAU,KAAK,KAAK,QAAQ,YAAY,EAAE,GAAG,SAAS,IAAI;;;;;;;;;;;;;;;;;;;;;;AC9BlE,eAAsB,6BAA6B,MAQf;CAClC,MAAM,EAAE,sBAAsB,mBAAmB;CAEjD,MAAM,kBAAkB,MAAM,6BAA6B,qBAAqB;CAEhF,MAAM,kCAAkB,IAAI,KAAsC;CAClE,KAAK,MAAM,WAAW,gBAAgB;EACpC,IAAI,YAAY,cAAc;EAC9B,MAAM,OAAO,MAAM,yBAAyB,sBAAsB,QAAQ;EAC1E,IAAI,SAAS,MACX,gBAAgB,IAAI,SAAS,KAAK;;CAItC,OAAO;EAAE;EAAiB;EAAiB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ACP7C,SAAgB,cACd,QACA,WACsC;CACtC,MAAM,uBAAO,IAAI,KAAa;CAC9B,KAAK,MAAM,SAAS,QAAQ;EAC1B,IAAI,KAAK,IAAI,MAAM,QAAQ,EACzB,MAAM,sBAAsB,MAAM,QAAQ;EAE5C,KAAK,IAAI,MAAM,QAAQ;;CASzB,OANe,CAAC,GAAG,OAAO,CAAC,MAAM,GAAG,MAAM;EACxC,IAAI,EAAE,UAAU,EAAE,SAAS,OAAO;EAClC,IAAI,EAAE,UAAU,EAAE,SAAS,OAAO;EAClC,OAAO;GAGI,CAAC,KAAK,WAAW;EAC5B,SAAS,MAAM;EACf,mBAAmB,UAAU,MAAM;EACpC,EAAE"}
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
//#region src/graph.d.ts
|
|
2
|
+
/**
|
|
3
|
+
* An entry in the migration graph. All on-disk migrations are attested,
|
|
4
|
+
* so `migrationHash` is always a string.
|
|
5
|
+
*/
|
|
6
|
+
interface MigrationEdge {
|
|
7
|
+
readonly from: string;
|
|
8
|
+
readonly to: string;
|
|
9
|
+
readonly migrationHash: string;
|
|
10
|
+
readonly dirName: string;
|
|
11
|
+
readonly createdAt: string;
|
|
12
|
+
readonly labels: readonly string[];
|
|
13
|
+
/**
|
|
14
|
+
* Sorted, deduplicated list of `invariantId`s this edge provides.
|
|
15
|
+
* An empty array means the migration declares no routing-visible
|
|
16
|
+
* data transforms.
|
|
17
|
+
*/
|
|
18
|
+
readonly invariants: readonly string[];
|
|
19
|
+
}
|
|
20
|
+
interface MigrationGraph {
|
|
21
|
+
readonly nodes: ReadonlySet<string>;
|
|
22
|
+
readonly forwardChain: ReadonlyMap<string, readonly MigrationEdge[]>;
|
|
23
|
+
readonly reverseChain: ReadonlyMap<string, readonly MigrationEdge[]>;
|
|
24
|
+
readonly migrationByHash: ReadonlyMap<string, MigrationEdge>;
|
|
25
|
+
}
|
|
26
|
+
//#endregion
|
|
27
|
+
export { MigrationGraph as n, MigrationEdge as t };
|
|
28
|
+
//# sourceMappingURL=graph-HMWAldoR.d.mts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"graph-HMWAldoR.d.mts","names":[],"sources":["../src/graph.ts"],"mappings":";;AAIA;;;UAAiB,aAAA;EAAA,SACN,IAAA;EAAA,SACA,EAAA;EAAA,SACA,aAAA;EAAA,SACA,OAAA;EAAA,SACA,SAAA;EAAA,SACA,MAAA;EAMA;;;AAGX;;EAHW,SAAA,UAAA;AAAA;AAAA,UAGM,cAAA;EAAA,SACN,KAAA,EAAO,WAAA;EAAA,SACP,YAAA,EAAc,WAAA,kBAA6B,aAAA;EAAA,SAC3C,YAAA,EAAc,WAAA,kBAA6B,aAAA;EAAA,SAC3C,eAAA,EAAiB,WAAA,SAAoB,aAAA;AAAA"}
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
import { createHash } from "node:crypto";
|
|
2
|
+
//#region src/canonicalize-json.ts
|
|
3
|
+
function sortKeys(value) {
|
|
4
|
+
if (value === null || typeof value !== "object") return value;
|
|
5
|
+
if (Array.isArray(value)) return value.map(sortKeys);
|
|
6
|
+
const sorted = {};
|
|
7
|
+
for (const key of Object.keys(value).sort()) sorted[key] = sortKeys(value[key]);
|
|
8
|
+
return sorted;
|
|
9
|
+
}
|
|
10
|
+
function canonicalizeJson(value) {
|
|
11
|
+
return JSON.stringify(sortKeys(value));
|
|
12
|
+
}
|
|
13
|
+
//#endregion
|
|
14
|
+
//#region src/hash.ts
|
|
15
|
+
function sha256Hex(input) {
|
|
16
|
+
return createHash("sha256").update(input).digest("hex");
|
|
17
|
+
}
|
|
18
|
+
/**
|
|
19
|
+
* Content-addressed migration hash over (metadata envelope sans
|
|
20
|
+
* contracts/hints/signature, ops). See ADR 199 — Storage-only migration
|
|
21
|
+
* identity for the rationale: contracts are anchored separately by the
|
|
22
|
+
* storage-hash bookends inside the envelope; planner hints are advisory
|
|
23
|
+
* and must not affect identity.
|
|
24
|
+
*
|
|
25
|
+
* The integrity check is purely structural, not semantic. The function
|
|
26
|
+
* canonicalizes its inputs via `sortKeys` (recursive) + `JSON.stringify`
|
|
27
|
+
* and hashes the result. Target-specific operation payloads (`step.sql`,
|
|
28
|
+
* Mongo's pipeline AST, …) are hashed verbatim — no per-target
|
|
29
|
+
* normalization is required, because what's being verified is "do the
|
|
30
|
+
* on-disk bytes still produce their recorded hash", not "do two
|
|
31
|
+
* semantically-equivalent migrations hash the same". The latter is an
|
|
32
|
+
* emit-drift concern (ADR 192 step 2).
|
|
33
|
+
*
|
|
34
|
+
* The symmetry across write and read holds because `JSON.parse(
|
|
35
|
+
* JSON.stringify(x))` round-trips JSON-safe values losslessly and
|
|
36
|
+
* `sortKeys` is idempotent and deterministic — write-time and read-time
|
|
37
|
+
* canonicalization produce the same canonical bytes regardless of
|
|
38
|
+
* source-side key ordering or whitespace.
|
|
39
|
+
*
|
|
40
|
+
* The `migrationHash` field on the metadata is stripped before hashing
|
|
41
|
+
* so the function can be used both at write time (when no hash exists
|
|
42
|
+
* yet) and at verify time (rehashing an already-attested record).
|
|
43
|
+
*/
|
|
44
|
+
function computeMigrationHash(metadata, ops) {
|
|
45
|
+
const { migrationHash: _migrationHash, signature: _signature, fromContract: _fromContract, toContract: _toContract, hints: _hints, ...strippedMeta } = metadata;
|
|
46
|
+
return `sha256:${sha256Hex(canonicalizeJson([canonicalizeJson(strippedMeta), canonicalizeJson(ops)].map(sha256Hex)))}`;
|
|
47
|
+
}
|
|
48
|
+
/**
|
|
49
|
+
* Re-hash an in-memory migration package and compare against the stored
|
|
50
|
+
* `migrationHash`. See `computeMigrationHash` for the canonicalization rules.
|
|
51
|
+
*
|
|
52
|
+
* Returns `{ ok: true }` when the package is internally consistent, or
|
|
53
|
+
* `{ ok: false, reason: 'mismatch', storedHash, computedHash }` when it is
|
|
54
|
+
* not — typically a sign of FS corruption, partial writes, or a post-emit
|
|
55
|
+
* hand edit.
|
|
56
|
+
*/
|
|
57
|
+
function verifyMigrationHash(pkg) {
|
|
58
|
+
const computed = computeMigrationHash(pkg.metadata, pkg.ops);
|
|
59
|
+
if (pkg.metadata.migrationHash === computed) return {
|
|
60
|
+
ok: true,
|
|
61
|
+
storedHash: pkg.metadata.migrationHash,
|
|
62
|
+
computedHash: computed
|
|
63
|
+
};
|
|
64
|
+
return {
|
|
65
|
+
ok: false,
|
|
66
|
+
reason: "mismatch",
|
|
67
|
+
storedHash: pkg.metadata.migrationHash,
|
|
68
|
+
computedHash: computed
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
//#endregion
|
|
72
|
+
export { verifyMigrationHash as n, canonicalizeJson as r, computeMigrationHash as t };
|
|
73
|
+
|
|
74
|
+
//# sourceMappingURL=hash-By50zM_E.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"hash-By50zM_E.mjs","names":[],"sources":["../src/canonicalize-json.ts","../src/hash.ts"],"sourcesContent":["function sortKeys(value: unknown): unknown {\n if (value === null || typeof value !== 'object') {\n return value;\n }\n if (Array.isArray(value)) {\n return value.map(sortKeys);\n }\n const sorted: Record<string, unknown> = {};\n for (const key of Object.keys(value).sort()) {\n sorted[key] = sortKeys((value as Record<string, unknown>)[key]);\n }\n return sorted;\n}\n\nexport function canonicalizeJson(value: unknown): string {\n return JSON.stringify(sortKeys(value));\n}\n","import { createHash } from 'node:crypto';\nimport { canonicalizeJson } from './canonicalize-json';\nimport type { MigrationMetadata } from './metadata';\nimport type { MigrationOps, OnDiskMigrationPackage } from './package';\n\nexport interface VerifyResult {\n readonly ok: boolean;\n readonly reason?: 'mismatch';\n readonly storedHash: string;\n readonly computedHash: string;\n}\n\nfunction sha256Hex(input: string): string {\n return createHash('sha256').update(input).digest('hex');\n}\n\n/**\n * Content-addressed migration hash over (metadata envelope sans\n * contracts/hints/signature, ops). See ADR 199 — Storage-only migration\n * identity for the rationale: contracts are anchored separately by the\n * storage-hash bookends inside the envelope; planner hints are advisory\n * and must not affect identity.\n *\n * The integrity check is purely structural, not semantic. The function\n * canonicalizes its inputs via `sortKeys` (recursive) + `JSON.stringify`\n * and hashes the result. Target-specific operation payloads (`step.sql`,\n * Mongo's pipeline AST, …) are hashed verbatim — no per-target\n * normalization is required, because what's being verified is \"do the\n * on-disk bytes still produce their recorded hash\", not \"do two\n * semantically-equivalent migrations hash the same\". The latter is an\n * emit-drift concern (ADR 192 step 2).\n *\n * The symmetry across write and read holds because `JSON.parse(\n * JSON.stringify(x))` round-trips JSON-safe values losslessly and\n * `sortKeys` is idempotent and deterministic — write-time and read-time\n * canonicalization produce the same canonical bytes regardless of\n * source-side key ordering or whitespace.\n *\n * The `migrationHash` field on the metadata is stripped before hashing\n * so the function can be used both at write time (when no hash exists\n * yet) and at verify time (rehashing an already-attested record).\n */\nexport function computeMigrationHash(\n metadata: Omit<MigrationMetadata, 'migrationHash'> & { readonly migrationHash?: string },\n ops: MigrationOps,\n): string {\n const {\n migrationHash: _migrationHash,\n signature: _signature,\n fromContract: _fromContract,\n toContract: _toContract,\n hints: _hints,\n ...strippedMeta\n } = metadata;\n\n const canonicalMetadata = canonicalizeJson(strippedMeta);\n const canonicalOps = canonicalizeJson(ops);\n\n const partHashes = [canonicalMetadata, canonicalOps].map(sha256Hex);\n const hash = sha256Hex(canonicalizeJson(partHashes));\n\n return `sha256:${hash}`;\n}\n\n/**\n * Re-hash an in-memory migration package and compare against the stored\n * `migrationHash`. See `computeMigrationHash` for the canonicalization rules.\n *\n * Returns `{ ok: true }` when the package is internally consistent, or\n * `{ ok: false, reason: 'mismatch', storedHash, computedHash }` when it is\n * not — typically a sign of FS corruption, partial writes, or a post-emit\n * hand edit.\n */\nexport function verifyMigrationHash(pkg: OnDiskMigrationPackage): VerifyResult {\n const computed = computeMigrationHash(pkg.metadata, pkg.ops);\n\n if (pkg.metadata.migrationHash === computed) {\n return {\n ok: true,\n storedHash: pkg.metadata.migrationHash,\n computedHash: computed,\n };\n }\n\n return {\n ok: false,\n reason: 'mismatch',\n storedHash: pkg.metadata.migrationHash,\n computedHash: computed,\n };\n}\n"],"mappings":";;AAAA,SAAS,SAAS,OAAyB;CACzC,IAAI,UAAU,QAAQ,OAAO,UAAU,UACrC,OAAO;CAET,IAAI,MAAM,QAAQ,MAAM,EACtB,OAAO,MAAM,IAAI,SAAS;CAE5B,MAAM,SAAkC,EAAE;CAC1C,KAAK,MAAM,OAAO,OAAO,KAAK,MAAM,CAAC,MAAM,EACzC,OAAO,OAAO,SAAU,MAAkC,KAAK;CAEjE,OAAO;;AAGT,SAAgB,iBAAiB,OAAwB;CACvD,OAAO,KAAK,UAAU,SAAS,MAAM,CAAC;;;;ACHxC,SAAS,UAAU,OAAuB;CACxC,OAAO,WAAW,SAAS,CAAC,OAAO,MAAM,CAAC,OAAO,MAAM;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA6BzD,SAAgB,qBACd,UACA,KACQ;CACR,MAAM,EACJ,eAAe,gBACf,WAAW,YACX,cAAc,eACd,YAAY,aACZ,OAAO,QACP,GAAG,iBACD;CAQJ,OAAO,UAFM,UAAU,iBADJ,CAHO,iBAAiB,aAGN,EAFhB,iBAAiB,IAEa,CAAC,CAAC,IAAI,UACP,CAAC,CAE9B;;;;;;;;;;;AAYvB,SAAgB,oBAAoB,KAA2C;CAC7E,MAAM,WAAW,qBAAqB,IAAI,UAAU,IAAI,IAAI;CAE5D,IAAI,IAAI,SAAS,kBAAkB,UACjC,OAAO;EACL,IAAI;EACJ,YAAY,IAAI,SAAS;EACzB,cAAc;EACf;CAGH,OAAO;EACL,IAAI;EACJ,QAAQ;EACR,YAAY,IAAI,SAAS;EACzB,cAAc;EACf"}
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
import { a as errorDuplicateInvariantInEdge, l as errorInvalidInvariantId } from "./errors-EPL_9p9f.mjs";
|
|
2
|
+
//#region src/invariants.ts
|
|
3
|
+
/**
|
|
4
|
+
* Hygiene check for `invariantId`. Rejects empty values plus any
|
|
5
|
+
* whitespace or control character (including Unicode whitespace like
|
|
6
|
+
* NBSP and em space, which are visually identical to ASCII space and
|
|
7
|
+
* routinely sneak in via paste).
|
|
8
|
+
*/
|
|
9
|
+
function validateInvariantId(invariantId) {
|
|
10
|
+
if (invariantId.length === 0) return false;
|
|
11
|
+
return !/[\p{Cc}\p{White_Space}]/u.test(invariantId);
|
|
12
|
+
}
|
|
13
|
+
/**
|
|
14
|
+
* Walk a migration's operations and produce its `providedInvariants`
|
|
15
|
+
* aggregate: the sorted, deduplicated list of `invariantId`s declared
|
|
16
|
+
* by ops in the migration. Ops without an `invariantId` are skipped.
|
|
17
|
+
*
|
|
18
|
+
* Both `data`-class ops (data-transforms, e.g. backfills) and
|
|
19
|
+
* `additive`-class opaque DDL (e.g. cipherstash's vendored EQL bundle
|
|
20
|
+
* via `installEqlBundleOp`) may declare invariantIds: the
|
|
21
|
+
* `operationClass` axis classifies *policy gating* (which kinds of ops
|
|
22
|
+
* a `db init` / `db update` policy permits), while `invariantId`
|
|
23
|
+
* classifies *marker bookkeeping* (which named bundles of work a
|
|
24
|
+
* future regeneration knows to skip). The two concerns are
|
|
25
|
+
* intentionally orthogonal — an extension can ship additive
|
|
26
|
+
* non-IR-derivable DDL (the only way the planner can know the bundle
|
|
27
|
+
* is already applied is via the invariantId on the marker) without
|
|
28
|
+
* needing to mis-classify it as `data`-class.
|
|
29
|
+
*
|
|
30
|
+
* Throws `MIGRATION.INVALID_INVARIANT_ID` on a malformed id and
|
|
31
|
+
* `MIGRATION.DUPLICATE_INVARIANT_IN_EDGE` on duplicates.
|
|
32
|
+
*/
|
|
33
|
+
function deriveProvidedInvariants(ops) {
|
|
34
|
+
const seen = /* @__PURE__ */ new Set();
|
|
35
|
+
for (const op of ops) {
|
|
36
|
+
const invariantId = readInvariantId(op);
|
|
37
|
+
if (invariantId === void 0) continue;
|
|
38
|
+
if (!validateInvariantId(invariantId)) throw errorInvalidInvariantId(invariantId);
|
|
39
|
+
if (seen.has(invariantId)) throw errorDuplicateInvariantInEdge(invariantId);
|
|
40
|
+
seen.add(invariantId);
|
|
41
|
+
}
|
|
42
|
+
return [...seen].sort();
|
|
43
|
+
}
|
|
44
|
+
function readInvariantId(op) {
|
|
45
|
+
if (!Object.hasOwn(op, "invariantId")) return void 0;
|
|
46
|
+
const candidate = op.invariantId;
|
|
47
|
+
return typeof candidate === "string" ? candidate : void 0;
|
|
48
|
+
}
|
|
49
|
+
//#endregion
|
|
50
|
+
export { validateInvariantId as n, deriveProvidedInvariants as t };
|
|
51
|
+
|
|
52
|
+
//# sourceMappingURL=invariants-Duc8f9NM.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"invariants-Duc8f9NM.mjs","names":[],"sources":["../src/invariants.ts"],"sourcesContent":["import type { MigrationPlanOperation } from '@prisma-next/framework-components/control';\nimport { errorDuplicateInvariantInEdge, errorInvalidInvariantId } from './errors';\nimport type { MigrationOps } from './package';\n\n/**\n * Hygiene check for `invariantId`. Rejects empty values plus any\n * whitespace or control character (including Unicode whitespace like\n * NBSP and em space, which are visually identical to ASCII space and\n * routinely sneak in via paste).\n */\nexport function validateInvariantId(invariantId: string): boolean {\n if (invariantId.length === 0) return false;\n return !/[\\p{Cc}\\p{White_Space}]/u.test(invariantId);\n}\n\n/**\n * Walk a migration's operations and produce its `providedInvariants`\n * aggregate: the sorted, deduplicated list of `invariantId`s declared\n * by ops in the migration. Ops without an `invariantId` are skipped.\n *\n * Both `data`-class ops (data-transforms, e.g. backfills) and\n * `additive`-class opaque DDL (e.g. cipherstash's vendored EQL bundle\n * via `installEqlBundleOp`) may declare invariantIds: the\n * `operationClass` axis classifies *policy gating* (which kinds of ops\n * a `db init` / `db update` policy permits), while `invariantId`\n * classifies *marker bookkeeping* (which named bundles of work a\n * future regeneration knows to skip). The two concerns are\n * intentionally orthogonal — an extension can ship additive\n * non-IR-derivable DDL (the only way the planner can know the bundle\n * is already applied is via the invariantId on the marker) without\n * needing to mis-classify it as `data`-class.\n *\n * Throws `MIGRATION.INVALID_INVARIANT_ID` on a malformed id and\n * `MIGRATION.DUPLICATE_INVARIANT_IN_EDGE` on duplicates.\n */\nexport function deriveProvidedInvariants(ops: MigrationOps): readonly string[] {\n const seen = new Set<string>();\n for (const op of ops) {\n const invariantId = readInvariantId(op);\n if (invariantId === undefined) continue;\n if (!validateInvariantId(invariantId)) {\n throw errorInvalidInvariantId(invariantId);\n }\n if (seen.has(invariantId)) {\n throw errorDuplicateInvariantInEdge(invariantId);\n }\n seen.add(invariantId);\n }\n return [...seen].sort();\n}\n\nfunction readInvariantId(op: MigrationPlanOperation): string | undefined {\n if (!Object.hasOwn(op, 'invariantId')) return undefined;\n const candidate = (op as { invariantId?: unknown }).invariantId;\n return typeof candidate === 'string' ? candidate : undefined;\n}\n"],"mappings":";;;;;;;;AAUA,SAAgB,oBAAoB,aAA8B;CAChE,IAAI,YAAY,WAAW,GAAG,OAAO;CACrC,OAAO,CAAC,2BAA2B,KAAK,YAAY;;;;;;;;;;;;;;;;;;;;;;AAuBtD,SAAgB,yBAAyB,KAAsC;CAC7E,MAAM,uBAAO,IAAI,KAAa;CAC9B,KAAK,MAAM,MAAM,KAAK;EACpB,MAAM,cAAc,gBAAgB,GAAG;EACvC,IAAI,gBAAgB,KAAA,GAAW;EAC/B,IAAI,CAAC,oBAAoB,YAAY,EACnC,MAAM,wBAAwB,YAAY;EAE5C,IAAI,KAAK,IAAI,YAAY,EACvB,MAAM,8BAA8B,YAAY;EAElD,KAAK,IAAI,YAAY;;CAEvB,OAAO,CAAC,GAAG,KAAK,CAAC,MAAM;;AAGzB,SAAS,gBAAgB,IAAgD;CACvE,IAAI,CAAC,OAAO,OAAO,IAAI,cAAc,EAAE,OAAO,KAAA;CAC9C,MAAM,YAAa,GAAiC;CACpD,OAAO,OAAO,cAAc,WAAW,YAAY,KAAA"}
|