@prisma-next/migration-tools 0.5.0-dev.9 → 0.6.0-dev.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. package/README.md +34 -22
  2. package/dist/{constants-BRi0X7B_.mjs → constants-DWV9_o2Z.mjs} +2 -2
  3. package/dist/{constants-BRi0X7B_.mjs.map → constants-DWV9_o2Z.mjs.map} +1 -1
  4. package/dist/errors-EPL_9p9f.mjs +297 -0
  5. package/dist/errors-EPL_9p9f.mjs.map +1 -0
  6. package/dist/exports/aggregate.d.mts +614 -0
  7. package/dist/exports/aggregate.d.mts.map +1 -0
  8. package/dist/exports/aggregate.mjs +611 -0
  9. package/dist/exports/aggregate.mjs.map +1 -0
  10. package/dist/exports/constants.d.mts.map +1 -1
  11. package/dist/exports/constants.mjs +2 -3
  12. package/dist/exports/errors.d.mts +68 -0
  13. package/dist/exports/errors.d.mts.map +1 -0
  14. package/dist/exports/errors.mjs +2 -0
  15. package/dist/exports/graph.d.mts +2 -0
  16. package/dist/exports/graph.mjs +1 -0
  17. package/dist/exports/hash.d.mts +52 -0
  18. package/dist/exports/hash.d.mts.map +1 -0
  19. package/dist/exports/hash.mjs +2 -0
  20. package/dist/exports/invariants.d.mts +39 -0
  21. package/dist/exports/invariants.d.mts.map +1 -0
  22. package/dist/exports/invariants.mjs +2 -0
  23. package/dist/exports/io.d.mts +66 -6
  24. package/dist/exports/io.d.mts.map +1 -1
  25. package/dist/exports/io.mjs +2 -3
  26. package/dist/exports/metadata.d.mts +2 -0
  27. package/dist/exports/metadata.mjs +1 -0
  28. package/dist/exports/migration-graph.d.mts +2 -0
  29. package/dist/exports/migration-graph.mjs +2 -0
  30. package/dist/exports/migration-ts.d.mts.map +1 -1
  31. package/dist/exports/migration-ts.mjs +2 -4
  32. package/dist/exports/migration-ts.mjs.map +1 -1
  33. package/dist/exports/migration.d.mts +15 -14
  34. package/dist/exports/migration.d.mts.map +1 -1
  35. package/dist/exports/migration.mjs +70 -43
  36. package/dist/exports/migration.mjs.map +1 -1
  37. package/dist/exports/package.d.mts +3 -0
  38. package/dist/exports/package.mjs +1 -0
  39. package/dist/exports/refs.d.mts.map +1 -1
  40. package/dist/exports/refs.mjs +3 -4
  41. package/dist/exports/refs.mjs.map +1 -1
  42. package/dist/exports/spaces.d.mts +591 -0
  43. package/dist/exports/spaces.d.mts.map +1 -0
  44. package/dist/exports/spaces.mjs +266 -0
  45. package/dist/exports/spaces.mjs.map +1 -0
  46. package/dist/graph-HMWAldoR.d.mts +28 -0
  47. package/dist/graph-HMWAldoR.d.mts.map +1 -0
  48. package/dist/hash-By50zM_E.mjs +74 -0
  49. package/dist/hash-By50zM_E.mjs.map +1 -0
  50. package/dist/invariants-qgQGlsrV.mjs +57 -0
  51. package/dist/invariants-qgQGlsrV.mjs.map +1 -0
  52. package/dist/io-D5YYptRO.mjs +239 -0
  53. package/dist/io-D5YYptRO.mjs.map +1 -0
  54. package/dist/metadata-CFvm3ayn.d.mts +2 -0
  55. package/dist/migration-graph-DGNnKDY5.mjs +523 -0
  56. package/dist/migration-graph-DGNnKDY5.mjs.map +1 -0
  57. package/dist/migration-graph-DulOITvG.d.mts +124 -0
  58. package/dist/migration-graph-DulOITvG.d.mts.map +1 -0
  59. package/dist/op-schema-D5qkXfEf.mjs +13 -0
  60. package/dist/op-schema-D5qkXfEf.mjs.map +1 -0
  61. package/dist/package-BjiZ7KDy.d.mts +21 -0
  62. package/dist/package-BjiZ7KDy.d.mts.map +1 -0
  63. package/dist/read-contract-space-contract-Bj_EMYSC.mjs +298 -0
  64. package/dist/read-contract-space-contract-Bj_EMYSC.mjs.map +1 -0
  65. package/package.json +42 -17
  66. package/src/aggregate/loader.ts +409 -0
  67. package/src/aggregate/marker-types.ts +16 -0
  68. package/src/aggregate/planner-types.ts +171 -0
  69. package/src/aggregate/planner.ts +158 -0
  70. package/src/aggregate/project-schema-to-space.ts +64 -0
  71. package/src/aggregate/strategies/graph-walk.ts +118 -0
  72. package/src/aggregate/strategies/synth.ts +122 -0
  73. package/src/aggregate/types.ts +89 -0
  74. package/src/aggregate/verifier.ts +230 -0
  75. package/src/assert-descriptor-self-consistency.ts +70 -0
  76. package/src/compute-extension-space-apply-path.ts +152 -0
  77. package/src/concatenate-space-apply-inputs.ts +90 -0
  78. package/src/contract-space-from-json.ts +63 -0
  79. package/src/detect-space-contract-drift.ts +91 -0
  80. package/src/emit-contract-space-artefacts.ts +70 -0
  81. package/src/errors.ts +251 -17
  82. package/src/exports/aggregate.ts +42 -0
  83. package/src/exports/errors.ts +8 -0
  84. package/src/exports/graph.ts +1 -0
  85. package/src/exports/hash.ts +2 -0
  86. package/src/exports/invariants.ts +1 -0
  87. package/src/exports/io.ts +3 -1
  88. package/src/exports/metadata.ts +1 -0
  89. package/src/exports/{dag.ts → migration-graph.ts} +3 -2
  90. package/src/exports/migration.ts +0 -1
  91. package/src/exports/package.ts +2 -0
  92. package/src/exports/spaces.ts +50 -0
  93. package/src/gather-disk-contract-space-state.ts +62 -0
  94. package/src/graph-ops.ts +57 -30
  95. package/src/graph.ts +25 -0
  96. package/src/hash.ts +91 -0
  97. package/src/invariants.ts +61 -0
  98. package/src/io.ts +163 -40
  99. package/src/metadata.ts +1 -0
  100. package/src/migration-base.ts +97 -56
  101. package/src/migration-graph.ts +676 -0
  102. package/src/op-schema.ts +11 -0
  103. package/src/package.ts +21 -0
  104. package/src/plan-all-spaces.ts +76 -0
  105. package/src/read-contract-space-contract.ts +44 -0
  106. package/src/read-contract-space-head-ref.ts +63 -0
  107. package/src/space-layout.ts +48 -0
  108. package/src/verify-contract-spaces.ts +272 -0
  109. package/dist/attestation-BnzTb0Qp.mjs +0 -65
  110. package/dist/attestation-BnzTb0Qp.mjs.map +0 -1
  111. package/dist/errors-BmiSgz1j.mjs +0 -160
  112. package/dist/errors-BmiSgz1j.mjs.map +0 -1
  113. package/dist/exports/attestation.d.mts +0 -37
  114. package/dist/exports/attestation.d.mts.map +0 -1
  115. package/dist/exports/attestation.mjs +0 -4
  116. package/dist/exports/dag.d.mts +0 -51
  117. package/dist/exports/dag.d.mts.map +0 -1
  118. package/dist/exports/dag.mjs +0 -386
  119. package/dist/exports/dag.mjs.map +0 -1
  120. package/dist/exports/types.d.mts +0 -35
  121. package/dist/exports/types.d.mts.map +0 -1
  122. package/dist/exports/types.mjs +0 -3
  123. package/dist/io-Cd6GLyjK.mjs +0 -153
  124. package/dist/io-Cd6GLyjK.mjs.map +0 -1
  125. package/dist/types-DyGXcWWp.d.mts +0 -71
  126. package/dist/types-DyGXcWWp.d.mts.map +0 -1
  127. package/src/attestation.ts +0 -81
  128. package/src/dag.ts +0 -426
  129. package/src/exports/attestation.ts +0 -2
  130. package/src/exports/types.ts +0 -10
  131. package/src/types.ts +0 -66
@@ -0,0 +1,62 @@
1
+ import { readContractSpaceHeadRef } from './read-contract-space-head-ref';
2
+ import { APP_SPACE_ID } from './space-layout';
3
+ import {
4
+ type ContractSpaceHeadRecord,
5
+ listContractSpaceDirectories,
6
+ } from './verify-contract-spaces';
7
+
8
+ /**
9
+ * Disk-side inputs to {@link import('./verify-contract-spaces').verifyContractSpaces}
10
+ * — gathered without touching the live database. The caller composes
11
+ * this with the marker rows it reads from the runtime to invoke the
12
+ * verifier.
13
+ */
14
+ export interface DiskContractSpaceState {
15
+ /** Contract-space directory names observed under `<projectMigrationsDir>/`. */
16
+ readonly spaceDirsOnDisk: readonly string[];
17
+ /** Head-ref `(hash, invariants)` per extension space. */
18
+ readonly headRefsBySpace: ReadonlyMap<string, ContractSpaceHeadRecord>;
19
+ }
20
+
21
+ /**
22
+ * Read the on-disk state the per-space verifier needs:
23
+ *
24
+ * - The list of contract-space directories under
25
+ * `<projectMigrationsDir>/` (via
26
+ * {@link import('./verify-contract-spaces').listContractSpaceDirectories}).
27
+ * - The on-disk head ref `(hash, invariants)` for each declared extension space
28
+ * (via {@link readContractSpaceHeadRef}; missing on-disk artefacts are simply
29
+ * omitted — the verifier reports them as `declaredButUnmigrated`).
30
+ *
31
+ * Synchronous in spirit but async due to filesystem reads. Reads only
32
+ * the user's repo. **Does not import any extension descriptor module.**
33
+ *
34
+ * Composition convention: pure target-agnostic primitive in
35
+ * `1-framework`; the SQL family (and any future target family) wires
36
+ * it into its `dbInit` / `verify` flows alongside its own marker-row
37
+ * read before invoking `verifyContractSpaces`.
38
+ */
39
+ export async function gatherDiskContractSpaceState(args: {
40
+ readonly projectMigrationsDir: string;
41
+ /**
42
+ * Set of space ids the project declares: `'app'` plus each entry in
43
+ * `extensionPacks` whose descriptor exposes a `contractSpace`. The
44
+ * helper reads on-disk head data only for the extension members.
45
+ */
46
+ readonly loadedSpaceIds: ReadonlySet<string>;
47
+ }): Promise<DiskContractSpaceState> {
48
+ const { projectMigrationsDir, loadedSpaceIds } = args;
49
+
50
+ const spaceDirsOnDisk = await listContractSpaceDirectories(projectMigrationsDir);
51
+
52
+ const headRefsBySpace = new Map<string, ContractSpaceHeadRecord>();
53
+ for (const spaceId of loadedSpaceIds) {
54
+ if (spaceId === APP_SPACE_ID) continue;
55
+ const head = await readContractSpaceHeadRef(projectMigrationsDir, spaceId);
56
+ if (head !== null) {
57
+ headRefsBySpace.set(spaceId, head);
58
+ }
59
+ }
60
+
61
+ return { spaceDirsOnDisk, headRefsBySpace };
62
+ }
package/src/graph-ops.ts CHANGED
@@ -3,13 +3,18 @@ import { Queue } from './queue';
3
3
  /**
4
4
  * One step of a BFS traversal.
5
5
  *
6
- * `parent` and `incomingEdge` are `null` for start nodes — they were not
7
- * reached via any edge. For every other node they record the node and edge
8
- * by which this node was first reached.
6
+ * `parent` and `incomingEdge` are `null` for start states — they were not
7
+ * reached via any edge. For every other state they record the predecessor
8
+ * state and the edge by which this state was first reached.
9
+ *
10
+ * `state` is the BFS state, most often a string (graph node identifier) but
11
+ * can be a composite object. The string overload keeps the common case
12
+ * ergonomic; the generic overload accepts a caller-supplied `key` function
13
+ * that produces a stable equality key for dedup.
9
14
  */
10
- export interface BfsStep<E> {
11
- readonly node: string;
12
- readonly parent: string | null;
15
+ export interface BfsStep<S, E> {
16
+ readonly state: S;
17
+ readonly parent: S | null;
13
18
  readonly incomingEdge: E | null;
14
19
  }
15
20
 
@@ -17,48 +22,70 @@ export interface BfsStep<E> {
17
22
  * Generic breadth-first traversal.
18
23
  *
19
24
  * Direction (forward/reverse) is expressed by the caller's `neighbours`
20
- * closure: return `{ next, edge }` pairs where `next` is the node to visit
25
+ * closure: return `{ next, edge }` pairs where `next` is the state to visit
21
26
  * next and `edge` is the edge that connects them. Callers that don't need
22
27
  * path reconstruction can ignore the `parent`/`incomingEdge` fields of each
23
28
  * yielded step.
24
29
  *
30
+ * Ordering — when the result needs to be deterministic (path-finding) the
31
+ * caller is responsible for sorting inside `neighbours`; this generator
32
+ * does not impose an ordering hook of its own. State-dependent orderings
33
+ * have full access to the source state inside the closure.
34
+ *
25
35
  * Stops are intrinsic — callers `break` out of the `for..of` loop when
26
36
  * they've found what they're looking for.
27
- *
28
- * `ordering`, if provided, controls the order in which neighbours of each
29
- * node are enqueued. Only matters for path-finding: a deterministic ordering
30
- * makes BFS return a deterministic shortest path when multiple exist.
31
37
  */
32
- export function* bfs<E>(
38
+ export function bfs<E>(
33
39
  starts: Iterable<string>,
34
- neighbours: (node: string) => Iterable<{ next: string; edge: E }>,
35
- ordering?: (items: readonly { next: string; edge: E }[]) => readonly { next: string; edge: E }[],
36
- ): Generator<BfsStep<E>> {
40
+ neighbours: (state: string) => Iterable<{ next: string; edge: E }>,
41
+ ): Generator<BfsStep<string, E>>;
42
+ export function bfs<S, E>(
43
+ starts: Iterable<S>,
44
+ neighbours: (state: S) => Iterable<{ next: S; edge: E }>,
45
+ key: (state: S) => string,
46
+ ): Generator<BfsStep<S, E>>;
47
+ export function* bfs<S, E>(
48
+ starts: Iterable<S>,
49
+ neighbours: (state: S) => Iterable<{ next: S; edge: E }>,
50
+ // Identity default for the string overload. TypeScript can't express
51
+ // "default applies only when S = string", so this cast bridges the
52
+ // generic implementation signature to the public overloads — which
53
+ // guarantee `key` is omitted only when S = string at the call site.
54
+ key: (state: S) => string = (state) => state as unknown as string,
55
+ ): Generator<BfsStep<S, E>> {
56
+ // Queue entries carry the state alongside its key so we don't recompute
57
+ // key() twice per visit (once on dedup, once on parent lookup). Composite
58
+ // keys can be non-trivial to compute; string-overload callers pay nothing
59
+ // since key() is identity there.
60
+ interface Entry {
61
+ readonly state: S;
62
+ readonly key: string;
63
+ }
37
64
  const visited = new Set<string>();
38
- const parentMap = new Map<string, { parent: string; edge: E }>();
39
- const queue = new Queue<string>();
65
+ const parentMap = new Map<string, { parent: S; edge: E }>();
66
+ const queue = new Queue<Entry>();
40
67
  for (const start of starts) {
41
- if (!visited.has(start)) {
42
- visited.add(start);
43
- queue.push(start);
68
+ const k = key(start);
69
+ if (!visited.has(k)) {
70
+ visited.add(k);
71
+ queue.push({ state: start, key: k });
44
72
  }
45
73
  }
46
74
  while (!queue.isEmpty) {
47
- const current = queue.shift();
48
- const parentInfo = parentMap.get(current);
75
+ const { state: current, key: curKey } = queue.shift();
76
+ const parentInfo = parentMap.get(curKey);
49
77
  yield {
50
- node: current,
78
+ state: current,
51
79
  parent: parentInfo?.parent ?? null,
52
80
  incomingEdge: parentInfo?.edge ?? null,
53
81
  };
54
82
 
55
- const items = neighbours(current);
56
- const toVisit = ordering ? ordering([...items]) : items;
57
- for (const { next, edge } of toVisit) {
58
- if (!visited.has(next)) {
59
- visited.add(next);
60
- parentMap.set(next, { parent: current, edge });
61
- queue.push(next);
83
+ for (const { next, edge } of neighbours(current)) {
84
+ const k = key(next);
85
+ if (!visited.has(k)) {
86
+ visited.add(k);
87
+ parentMap.set(k, { parent: current, edge });
88
+ queue.push({ state: next, key: k });
62
89
  }
63
90
  }
64
91
  }
package/src/graph.ts ADDED
@@ -0,0 +1,25 @@
1
+ /**
2
+ * An entry in the migration graph. All on-disk migrations are attested,
3
+ * so `migrationHash` is always a string.
4
+ */
5
+ export interface MigrationEdge {
6
+ readonly from: string;
7
+ readonly to: string;
8
+ readonly migrationHash: string;
9
+ readonly dirName: string;
10
+ readonly createdAt: string;
11
+ readonly labels: readonly string[];
12
+ /**
13
+ * Sorted, deduplicated list of `invariantId`s this edge provides.
14
+ * An empty array means the migration declares no routing-visible
15
+ * data transforms.
16
+ */
17
+ readonly invariants: readonly string[];
18
+ }
19
+
20
+ export interface MigrationGraph {
21
+ readonly nodes: ReadonlySet<string>;
22
+ readonly forwardChain: ReadonlyMap<string, readonly MigrationEdge[]>;
23
+ readonly reverseChain: ReadonlyMap<string, readonly MigrationEdge[]>;
24
+ readonly migrationByHash: ReadonlyMap<string, MigrationEdge>;
25
+ }
package/src/hash.ts ADDED
@@ -0,0 +1,91 @@
1
+ import { createHash } from 'node:crypto';
2
+ import { canonicalizeJson } from './canonicalize-json';
3
+ import type { MigrationMetadata } from './metadata';
4
+ import type { MigrationOps, OnDiskMigrationPackage } from './package';
5
+
6
+ export interface VerifyResult {
7
+ readonly ok: boolean;
8
+ readonly reason?: 'mismatch';
9
+ readonly storedHash: string;
10
+ readonly computedHash: string;
11
+ }
12
+
13
+ function sha256Hex(input: string): string {
14
+ return createHash('sha256').update(input).digest('hex');
15
+ }
16
+
17
+ /**
18
+ * Content-addressed migration hash over (metadata envelope sans
19
+ * contracts/hints/signature, ops). See ADR 199 — Storage-only migration
20
+ * identity for the rationale: contracts are anchored separately by the
21
+ * storage-hash bookends inside the envelope; planner hints are advisory
22
+ * and must not affect identity.
23
+ *
24
+ * The integrity check is purely structural, not semantic. The function
25
+ * canonicalizes its inputs via `sortKeys` (recursive) + `JSON.stringify`
26
+ * and hashes the result. Target-specific operation payloads (`step.sql`,
27
+ * Mongo's pipeline AST, …) are hashed verbatim — no per-target
28
+ * normalization is required, because what's being verified is "do the
29
+ * on-disk bytes still produce their recorded hash", not "do two
30
+ * semantically-equivalent migrations hash the same". The latter is an
31
+ * emit-drift concern (ADR 192 step 2).
32
+ *
33
+ * The symmetry across write and read holds because `JSON.parse(
34
+ * JSON.stringify(x))` round-trips JSON-safe values losslessly and
35
+ * `sortKeys` is idempotent and deterministic — write-time and read-time
36
+ * canonicalization produce the same canonical bytes regardless of
37
+ * source-side key ordering or whitespace.
38
+ *
39
+ * The `migrationHash` field on the metadata is stripped before hashing
40
+ * so the function can be used both at write time (when no hash exists
41
+ * yet) and at verify time (rehashing an already-attested record).
42
+ */
43
+ export function computeMigrationHash(
44
+ metadata: Omit<MigrationMetadata, 'migrationHash'> & { readonly migrationHash?: string },
45
+ ops: MigrationOps,
46
+ ): string {
47
+ const {
48
+ migrationHash: _migrationHash,
49
+ signature: _signature,
50
+ fromContract: _fromContract,
51
+ toContract: _toContract,
52
+ hints: _hints,
53
+ ...strippedMeta
54
+ } = metadata;
55
+
56
+ const canonicalMetadata = canonicalizeJson(strippedMeta);
57
+ const canonicalOps = canonicalizeJson(ops);
58
+
59
+ const partHashes = [canonicalMetadata, canonicalOps].map(sha256Hex);
60
+ const hash = sha256Hex(canonicalizeJson(partHashes));
61
+
62
+ return `sha256:${hash}`;
63
+ }
64
+
65
+ /**
66
+ * Re-hash an in-memory migration package and compare against the stored
67
+ * `migrationHash`. See `computeMigrationHash` for the canonicalization rules.
68
+ *
69
+ * Returns `{ ok: true }` when the package is internally consistent, or
70
+ * `{ ok: false, reason: 'mismatch', storedHash, computedHash }` when it is
71
+ * not — typically a sign of FS corruption, partial writes, or a post-emit
72
+ * hand edit.
73
+ */
74
+ export function verifyMigrationHash(pkg: OnDiskMigrationPackage): VerifyResult {
75
+ const computed = computeMigrationHash(pkg.metadata, pkg.ops);
76
+
77
+ if (pkg.metadata.migrationHash === computed) {
78
+ return {
79
+ ok: true,
80
+ storedHash: pkg.metadata.migrationHash,
81
+ computedHash: computed,
82
+ };
83
+ }
84
+
85
+ return {
86
+ ok: false,
87
+ reason: 'mismatch',
88
+ storedHash: pkg.metadata.migrationHash,
89
+ computedHash: computed,
90
+ };
91
+ }
@@ -0,0 +1,61 @@
1
+ import type { MigrationPlanOperation } from '@prisma-next/framework-components/control';
2
+ import { errorDuplicateInvariantInEdge, errorInvalidInvariantId } from './errors';
3
+ import type { MigrationOps } from './package';
4
+
5
+ /**
6
+ * Hygiene check for `invariantId`. Rejects empty values plus any
7
+ * whitespace or control character (including Unicode whitespace like
8
+ * NBSP and em space, which are visually identical to ASCII space and
9
+ * routinely sneak in via paste).
10
+ */
11
+ export function validateInvariantId(invariantId: string): boolean {
12
+ if (invariantId.length === 0) return false;
13
+ return !/[\p{Cc}\p{White_Space}]/u.test(invariantId);
14
+ }
15
+
16
+ /**
17
+ * Walk a migration's operations and produce its `providedInvariants`
18
+ * aggregate: the sorted, deduplicated list of `invariantId`s declared
19
+ * by ops in the migration. Ops without an `invariantId` are skipped.
20
+ *
21
+ * Both `data`-class ops (data-transforms, e.g. backfills) and
22
+ * `additive`-class opaque DDL (e.g. cipherstash's vendored EQL bundle
23
+ * via `installEqlBundleOp`) may declare invariantIds: the
24
+ * `operationClass` axis classifies *policy gating* (which kinds of ops
25
+ * a `db init` / `db update` policy permits), while `invariantId`
26
+ * classifies *marker bookkeeping* (which named bundles of work a
27
+ * future regeneration knows to skip). The two concerns are
28
+ * intentionally orthogonal — an extension can ship additive
29
+ * non-IR-derivable DDL (the only way the planner can know the bundle
30
+ * is already applied is via the invariantId on the marker) without
31
+ * needing to mis-classify it as `data`-class.
32
+ *
33
+ * Throws `MIGRATION.INVALID_INVARIANT_ID` on a malformed id and
34
+ * `MIGRATION.DUPLICATE_INVARIANT_IN_EDGE` on duplicates.
35
+ *
36
+ * @see docs/architecture docs/adrs/ADR 212 - Contract spaces.md
37
+ * — extension migrations carry `invariantId`s on additive ops; e.g.
38
+ * cipherstash's `installEqlBundle` and structural `create-*` ops are
39
+ * additive-class but carry `cipherstash:*` invariantIds.
40
+ */
41
+ export function deriveProvidedInvariants(ops: MigrationOps): readonly string[] {
42
+ const seen = new Set<string>();
43
+ for (const op of ops) {
44
+ const invariantId = readInvariantId(op);
45
+ if (invariantId === undefined) continue;
46
+ if (!validateInvariantId(invariantId)) {
47
+ throw errorInvalidInvariantId(invariantId);
48
+ }
49
+ if (seen.has(invariantId)) {
50
+ throw errorDuplicateInvariantInEdge(invariantId);
51
+ }
52
+ seen.add(invariantId);
53
+ }
54
+ return [...seen].sort();
55
+ }
56
+
57
+ function readInvariantId(op: MigrationPlanOperation): string | undefined {
58
+ if (!Object.hasOwn(op, 'invariantId')) return undefined;
59
+ const candidate = (op as { invariantId?: unknown }).invariantId;
60
+ return typeof candidate === 'string' ? candidate : undefined;
61
+ }
package/src/io.ts CHANGED
@@ -1,17 +1,27 @@
1
- import { copyFile, mkdir, readdir, readFile, stat, writeFile } from 'node:fs/promises';
1
+ import { copyFile, mkdir, readdir, readFile, rm, stat, writeFile } from 'node:fs/promises';
2
+ import type {
3
+ MigrationMetadata,
4
+ MigrationPackage,
5
+ } from '@prisma-next/framework-components/control';
2
6
  import { type } from 'arktype';
3
- import { basename, dirname, join } from 'pathe';
7
+ import { basename, dirname, join, resolve } from 'pathe';
8
+ import { canonicalizeJson } from './canonicalize-json';
4
9
  import {
5
10
  errorDirectoryExists,
6
11
  errorInvalidDestName,
7
12
  errorInvalidJson,
8
13
  errorInvalidManifest,
9
14
  errorInvalidSlug,
15
+ errorMigrationHashMismatch,
10
16
  errorMissingFile,
17
+ errorProvidedInvariantsMismatch,
11
18
  } from './errors';
12
- import type { MigrationBundle, MigrationManifest, MigrationOps } from './types';
19
+ import { verifyMigrationHash } from './hash';
20
+ import { deriveProvidedInvariants } from './invariants';
21
+ import { MigrationOpsSchema } from './op-schema';
22
+ import type { MigrationOps, OnDiskMigrationPackage } from './package';
13
23
 
14
- const MANIFEST_FILE = 'migration.json';
24
+ export const MANIFEST_FILE = 'migration.json';
15
25
  const OPS_FILE = 'ops.json';
16
26
  const MAX_SLUG_LENGTH = 64;
17
27
 
@@ -25,15 +35,16 @@ const MigrationHintsSchema = type({
25
35
  plannerVersion: 'string',
26
36
  });
27
37
 
28
- const MigrationManifestSchema = type({
29
- from: 'string',
38
+ const MigrationMetadataSchema = type({
39
+ '+': 'reject',
40
+ from: 'string > 0 | null',
30
41
  to: 'string',
31
- migrationId: 'string',
32
- kind: "'regular' | 'baseline'",
42
+ migrationHash: 'string',
33
43
  fromContract: 'object | null',
34
44
  toContract: 'object',
35
45
  hints: MigrationHintsSchema,
36
46
  labels: 'string[]',
47
+ providedInvariants: 'string[]',
37
48
  'authorship?': type({
38
49
  'author?': 'string',
39
50
  'email?': 'string',
@@ -45,18 +56,9 @@ const MigrationManifestSchema = type({
45
56
  createdAt: 'string',
46
57
  });
47
58
 
48
- const MigrationOpSchema = type({
49
- id: 'string',
50
- label: 'string',
51
- operationClass: "'additive' | 'widening' | 'destructive' | 'data'",
52
- });
53
-
54
- // Intentionally shallow: operation-specific payload validation is owned by planner/runner layers.
55
- const MigrationOpsSchema = MigrationOpSchema.array();
56
-
57
59
  export async function writeMigrationPackage(
58
60
  dir: string,
59
- manifest: MigrationManifest,
61
+ metadata: MigrationMetadata,
60
62
  ops: MigrationOps,
61
63
  ): Promise<void> {
62
64
  await mkdir(dirname(dir), { recursive: true });
@@ -70,10 +72,100 @@ export async function writeMigrationPackage(
70
72
  throw error;
71
73
  }
72
74
 
73
- await writeFile(join(dir, MANIFEST_FILE), JSON.stringify(manifest, null, 2), { flag: 'wx' });
75
+ await writeFile(join(dir, MANIFEST_FILE), JSON.stringify(metadata, null, 2), {
76
+ flag: 'wx',
77
+ });
74
78
  await writeFile(join(dir, OPS_FILE), JSON.stringify(ops, null, 2), { flag: 'wx' });
75
79
  }
76
80
 
81
+ /**
82
+ * Materialise an in-memory {@link MigrationPackage} to a per-space
83
+ * directory on disk.
84
+ *
85
+ * Writes three files under `<targetDir>/<pkg.dirName>/`:
86
+ *
87
+ * - `migration.json` — the manifest (pretty-printed, matches
88
+ * {@link writeMigrationPackage}'s output for byte-for-byte parity with
89
+ * app-space migrations).
90
+ * - `ops.json` — the operation list (pretty-printed).
91
+ * - `contract.json` — the canonical-JSON serialisation of
92
+ * `metadata.toContract`. This is the per-package post-state contract
93
+ * snapshot; the canonicalisation pass guarantees byte-determinism so
94
+ * re-emitting the same package across machines / runs produces an
95
+ * identical file.
96
+ *
97
+ * Distinct verb from the lower-level {@link writeMigrationPackage}
98
+ * (which takes constituent `(metadata, ops)`): callers reading
99
+ * `materialise…` know they are persisting a struct-typed package
100
+ * including its contract-snapshot side car.
101
+ *
102
+ * Overwrite-idempotent: the per-package directory is cleared before
103
+ * each emit, so re-running against the same `targetDir` produces
104
+ * byte-identical contents and never leaves stale files behind. The
105
+ * spec's "re-emitting the same package across runs / machines produces
106
+ * byte-identical files" guarantee (§ 3) covers both same-dir and
107
+ * fresh-dir re-emits. The lower-level {@link writeMigrationPackage}
108
+ * stays strict because the CLI authoring path (`migration plan` /
109
+ * `migration new`) deliberately refuses to clobber an existing
110
+ * authored migration; this helper is the re-emit path that is
111
+ * supposed to converge on a single canonical on-disk shape.
112
+ *
113
+ * @see specs/framework-mechanism.spec.md § 3 — Emission helper (T1.7).
114
+ */
115
+ export async function materialiseMigrationPackage(
116
+ targetDir: string,
117
+ pkg: MigrationPackage,
118
+ ): Promise<void> {
119
+ const dir = join(targetDir, pkg.dirName);
120
+ await rm(dir, { recursive: true, force: true });
121
+ await writeMigrationPackage(dir, pkg.metadata, pkg.ops);
122
+ await writeFile(join(dir, 'contract.json'), `${canonicalizeJson(pkg.metadata.toContract)}\n`, {
123
+ flag: 'wx',
124
+ });
125
+ }
126
+
127
+ /**
128
+ * Idempotent variant of {@link materialiseMigrationPackage}: writes the
129
+ * package only if `<targetDir>/<pkg.dirName>/` does not already exist on
130
+ * disk as a directory; returns `{ written: false }` when the package
131
+ * directory is present (no rewrite, no comparison — by-existence skip).
132
+ *
133
+ * Concretely:
134
+ * - existing directory → skip silently, return `{ written: false }`.
135
+ * - missing path → write three files via {@link materialiseMigrationPackage},
136
+ * return `{ written: true }`.
137
+ * - path exists but is not a directory (file/symlink) → treated as
138
+ * missing; {@link materialiseMigrationPackage} will attempt creation
139
+ * and fail with an appropriate OS error.
140
+ * - any other I/O error from `stat` → propagated unchanged.
141
+ *
142
+ * Used by the CLI's `runContractSpaceExtensionMigrationsPass` to
143
+ * materialise extension migration packages into a project's
144
+ * `migrations/<spaceId>/` directory, and by extension-package tests
145
+ * that mirror the same idempotent-rematerialise property locally
146
+ * without taking a CLI dependency.
147
+ */
148
+ export async function materialiseExtensionMigrationPackageIfMissing(
149
+ targetDir: string,
150
+ pkg: MigrationPackage,
151
+ ): Promise<{ readonly written: boolean }> {
152
+ const pkgDir = join(targetDir, pkg.dirName);
153
+ if (await directoryExists(pkgDir)) {
154
+ return { written: false };
155
+ }
156
+ await materialiseMigrationPackage(targetDir, pkg);
157
+ return { written: true };
158
+ }
159
+
160
+ async function directoryExists(p: string): Promise<boolean> {
161
+ try {
162
+ return (await stat(p)).isDirectory();
163
+ } catch (error) {
164
+ if (hasErrnoCode(error, 'ENOENT')) return false;
165
+ throw error;
166
+ }
167
+ }
168
+
77
169
  /**
78
170
  * Copy a list of files into `destDir`, optionally renaming each one.
79
171
  *
@@ -98,27 +190,28 @@ export async function copyFilesWithRename(
98
190
  }
99
191
  }
100
192
 
101
- export async function writeMigrationManifest(
193
+ export async function writeMigrationMetadata(
102
194
  dir: string,
103
- manifest: MigrationManifest,
195
+ metadata: MigrationMetadata,
104
196
  ): Promise<void> {
105
- await writeFile(join(dir, MANIFEST_FILE), `${JSON.stringify(manifest, null, 2)}\n`);
197
+ await writeFile(join(dir, MANIFEST_FILE), `${JSON.stringify(metadata, null, 2)}\n`);
106
198
  }
107
199
 
108
200
  export async function writeMigrationOps(dir: string, ops: MigrationOps): Promise<void> {
109
201
  await writeFile(join(dir, OPS_FILE), `${JSON.stringify(ops, null, 2)}\n`);
110
202
  }
111
203
 
112
- export async function readMigrationPackage(dir: string): Promise<MigrationBundle> {
113
- const manifestPath = join(dir, MANIFEST_FILE);
114
- const opsPath = join(dir, OPS_FILE);
204
+ export async function readMigrationPackage(dir: string): Promise<OnDiskMigrationPackage> {
205
+ const absoluteDir = resolve(dir);
206
+ const manifestPath = join(absoluteDir, MANIFEST_FILE);
207
+ const opsPath = join(absoluteDir, OPS_FILE);
115
208
 
116
209
  let manifestRaw: string;
117
210
  try {
118
211
  manifestRaw = await readFile(manifestPath, 'utf-8');
119
212
  } catch (error) {
120
213
  if (hasErrnoCode(error, 'ENOENT')) {
121
- throw errorMissingFile(MANIFEST_FILE, dir);
214
+ throw errorMissingFile(MANIFEST_FILE, absoluteDir);
122
215
  }
123
216
  throw error;
124
217
  }
@@ -128,14 +221,14 @@ export async function readMigrationPackage(dir: string): Promise<MigrationBundle
128
221
  opsRaw = await readFile(opsPath, 'utf-8');
129
222
  } catch (error) {
130
223
  if (hasErrnoCode(error, 'ENOENT')) {
131
- throw errorMissingFile(OPS_FILE, dir);
224
+ throw errorMissingFile(OPS_FILE, absoluteDir);
132
225
  }
133
226
  throw error;
134
227
  }
135
228
 
136
- let manifest: MigrationManifest;
229
+ let metadata: MigrationMetadata;
137
230
  try {
138
- manifest = JSON.parse(manifestRaw);
231
+ metadata = JSON.parse(manifestRaw);
139
232
  } catch (e) {
140
233
  throw errorInvalidJson(manifestPath, e instanceof Error ? e.message : String(e));
141
234
  }
@@ -147,22 +240,52 @@ export async function readMigrationPackage(dir: string): Promise<MigrationBundle
147
240
  throw errorInvalidJson(opsPath, e instanceof Error ? e.message : String(e));
148
241
  }
149
242
 
150
- validateManifest(manifest, manifestPath);
243
+ validateMetadata(metadata, manifestPath);
151
244
  validateOps(ops, opsPath);
152
245
 
153
- return {
154
- dirName: basename(dir),
155
- dirPath: dir,
156
- manifest,
246
+ // Re-derive before the hash check so format/duplicate diagnostics
247
+ // fire with their dedicated codes rather than as a generic hash mismatch.
248
+ const derivedInvariants = deriveProvidedInvariants(ops);
249
+ if (!arraysEqual(metadata.providedInvariants, derivedInvariants)) {
250
+ throw errorProvidedInvariantsMismatch(
251
+ manifestPath,
252
+ metadata.providedInvariants,
253
+ derivedInvariants,
254
+ );
255
+ }
256
+
257
+ const pkg: OnDiskMigrationPackage = {
258
+ dirName: basename(absoluteDir),
259
+ dirPath: absoluteDir,
260
+ metadata,
157
261
  ops,
158
262
  };
263
+
264
+ const verification = verifyMigrationHash(pkg);
265
+ if (!verification.ok) {
266
+ throw errorMigrationHashMismatch(
267
+ absoluteDir,
268
+ verification.storedHash,
269
+ verification.computedHash,
270
+ );
271
+ }
272
+
273
+ return pkg;
274
+ }
275
+
276
+ function arraysEqual(a: readonly string[], b: readonly string[]): boolean {
277
+ if (a.length !== b.length) return false;
278
+ for (let i = 0; i < a.length; i++) {
279
+ if (a[i] !== b[i]) return false;
280
+ }
281
+ return true;
159
282
  }
160
283
 
161
- function validateManifest(
162
- manifest: unknown,
284
+ function validateMetadata(
285
+ metadata: unknown,
163
286
  filePath: string,
164
- ): asserts manifest is MigrationManifest {
165
- const result = MigrationManifestSchema(manifest);
287
+ ): asserts metadata is MigrationMetadata {
288
+ const result = MigrationMetadataSchema(metadata);
166
289
  if (result instanceof type.errors) {
167
290
  throw errorInvalidManifest(filePath, result.summary);
168
291
  }
@@ -177,7 +300,7 @@ function validateOps(ops: unknown, filePath: string): asserts ops is MigrationOp
177
300
 
178
301
  export async function readMigrationsDir(
179
302
  migrationsRoot: string,
180
- ): Promise<readonly MigrationBundle[]> {
303
+ ): Promise<readonly OnDiskMigrationPackage[]> {
181
304
  let entries: string[];
182
305
  try {
183
306
  entries = await readdir(migrationsRoot);
@@ -188,7 +311,7 @@ export async function readMigrationsDir(
188
311
  throw error;
189
312
  }
190
313
 
191
- const packages: MigrationBundle[] = [];
314
+ const packages: OnDiskMigrationPackage[] = [];
192
315
 
193
316
  for (const entry of entries.sort()) {
194
317
  const entryPath = join(migrationsRoot, entry);
@@ -0,0 +1 @@
1
+ export type { MigrationHints, MigrationMetadata } from '@prisma-next/framework-components/control';