@vltpkg/graph 1.0.0-rc.23 → 1.0.0-rc.25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/actual/load.d.ts +107 -0
- package/dist/actual/load.js +336 -0
- package/dist/browser.d.ts +14 -0
- package/dist/browser.js +16 -0
- package/dist/build.d.ts +28 -0
- package/dist/build.js +78 -0
- package/dist/dependencies.d.ts +65 -0
- package/dist/dependencies.js +111 -0
- package/dist/diff.d.ts +119 -0
- package/dist/diff.js +151 -0
- package/dist/edge.d.ts +46 -0
- package/dist/edge.js +77 -0
- package/dist/fixup-added-names.d.ts +18 -0
- package/dist/fixup-added-names.js +46 -0
- package/dist/graph.d.ts +153 -0
- package/dist/graph.js +444 -0
- package/dist/ideal/append-nodes.d.ts +31 -0
- package/dist/ideal/append-nodes.js +560 -0
- package/dist/ideal/build-ideal-from-starting-graph.d.ts +14 -0
- package/dist/ideal/build-ideal-from-starting-graph.js +69 -0
- package/dist/ideal/build.d.ts +40 -0
- package/dist/ideal/build.js +84 -0
- package/dist/ideal/get-importer-specs.d.ts +20 -0
- package/dist/ideal/get-importer-specs.js +180 -0
- package/dist/ideal/peers.d.ts +160 -0
- package/dist/ideal/peers.js +696 -0
- package/dist/ideal/refresh-ideal-graph.d.ts +43 -0
- package/dist/ideal/refresh-ideal-graph.js +62 -0
- package/dist/ideal/remove-satisfied-specs.d.ts +7 -0
- package/dist/ideal/remove-satisfied-specs.js +34 -0
- package/dist/ideal/sorting.d.ts +45 -0
- package/dist/ideal/sorting.js +70 -0
- package/dist/ideal/types.d.ts +107 -0
- package/dist/ideal/types.js +1 -0
- package/dist/index.d.ts +38 -0
- package/dist/index.js +32 -0
- package/dist/install.d.ts +19 -0
- package/dist/install.js +208 -0
- package/dist/lockfile/load-edges.d.ts +11 -0
- package/dist/lockfile/load-edges.js +105 -0
- package/dist/lockfile/load-nodes.d.ts +4 -0
- package/dist/lockfile/load-nodes.js +101 -0
- package/dist/lockfile/load.d.ts +45 -0
- package/dist/lockfile/load.js +84 -0
- package/dist/lockfile/save.d.ts +30 -0
- package/dist/lockfile/save.js +174 -0
- package/dist/lockfile/types.d.ts +95 -0
- package/dist/lockfile/types.js +49 -0
- package/dist/modifiers.d.ts +188 -0
- package/dist/modifiers.js +329 -0
- package/dist/node.d.ts +234 -0
- package/dist/node.js +388 -0
- package/dist/non-empty-list.d.ts +2 -0
- package/dist/non-empty-list.js +2 -0
- package/dist/reify/add-edge.d.ts +9 -0
- package/dist/reify/add-edge.js +71 -0
- package/dist/reify/add-edges.d.ts +4 -0
- package/dist/reify/add-edges.js +12 -0
- package/dist/reify/add-nodes.d.ts +6 -0
- package/dist/reify/add-nodes.js +16 -0
- package/dist/reify/bin-chmod.d.ts +10 -0
- package/dist/reify/bin-chmod.js +38 -0
- package/dist/reify/build.d.ts +13 -0
- package/dist/reify/build.js +111 -0
- package/dist/reify/calculate-save-value.d.ts +2 -0
- package/dist/reify/calculate-save-value.js +50 -0
- package/dist/reify/check-needed-build.d.ts +34 -0
- package/dist/reify/check-needed-build.js +71 -0
- package/dist/reify/delete-edge.d.ts +4 -0
- package/dist/reify/delete-edge.js +27 -0
- package/dist/reify/delete-edges.d.ts +4 -0
- package/dist/reify/delete-edges.js +13 -0
- package/dist/reify/delete-nodes.d.ts +4 -0
- package/dist/reify/delete-nodes.js +15 -0
- package/dist/reify/extract-node.d.ts +23 -0
- package/dist/reify/extract-node.js +83 -0
- package/dist/reify/index.d.ts +34 -0
- package/dist/reify/index.js +161 -0
- package/dist/reify/internal-hoist.d.ts +8 -0
- package/dist/reify/internal-hoist.js +133 -0
- package/dist/reify/optional-fail.d.ts +15 -0
- package/dist/reify/optional-fail.js +15 -0
- package/dist/reify/rollback.d.ts +4 -0
- package/dist/reify/rollback.js +23 -0
- package/dist/reify/update-importers-package-json.d.ts +35 -0
- package/dist/reify/update-importers-package-json.js +122 -0
- package/dist/remove-optional-subgraph.d.ts +33 -0
- package/dist/remove-optional-subgraph.js +47 -0
- package/dist/resolve-save-type.d.ts +5 -0
- package/dist/resolve-save-type.js +4 -0
- package/dist/stringify-node.d.ts +2 -0
- package/dist/stringify-node.js +32 -0
- package/dist/transfer-data/load.d.ts +43 -0
- package/dist/transfer-data/load.js +175 -0
- package/dist/uninstall.d.ts +14 -0
- package/dist/uninstall.js +75 -0
- package/dist/update.d.ts +12 -0
- package/dist/update.js +73 -0
- package/dist/virtual-root.d.ts +15 -0
- package/dist/virtual-root.js +78 -0
- package/dist/visualization/human-readable-output.d.ts +26 -0
- package/dist/visualization/human-readable-output.js +163 -0
- package/dist/visualization/json-output.d.ts +41 -0
- package/dist/visualization/json-output.js +50 -0
- package/dist/visualization/mermaid-output.d.ts +17 -0
- package/dist/visualization/mermaid-output.js +170 -0
- package/dist/visualization/object-like-output.d.ts +2 -0
- package/dist/visualization/object-like-output.js +47 -0
- package/package.json +22 -22
|
@@ -0,0 +1,696 @@
|
|
|
1
|
+
// helpers for managing peer dependency resolution
|
|
2
|
+
// during the ideal graph building process.
|
|
3
|
+
import { intersects } from '@vltpkg/semver';
|
|
4
|
+
import { satisfies } from '@vltpkg/satisfies';
|
|
5
|
+
import { Spec } from '@vltpkg/spec';
|
|
6
|
+
import { getDependencies, shorten } from "../dependencies.js";
|
|
7
|
+
import { compareByType, getOrderedDependencies } from "./sorting.js";
|
|
8
|
+
import { longDependencyTypes } from '@vltpkg/types';
|
|
9
|
+
/**
|
|
10
|
+
* Check if a node satisfies a spec within a given context.
|
|
11
|
+
*
|
|
12
|
+
* Wraps the common `satisfies()` call pattern used throughout peer dependency
|
|
13
|
+
* resolution. The satisfaction check requires:
|
|
14
|
+
* - `node.id`: The DepID of the candidate node
|
|
15
|
+
* - `spec`: The spec to satisfy (e.g., `^18.0.0`)
|
|
16
|
+
* - `fromNode.location`: Where the dependency is declared (affects file: specs)
|
|
17
|
+
* - `projectRoot`: For resolving workspace specs
|
|
18
|
+
* - `monorepo`: For workspace-aware resolution
|
|
19
|
+
*/
|
|
20
|
+
const nodeSatisfiesSpec = (node, spec, fromNode, graph) => satisfies(node.id, spec, fromNode.location, fromNode.projectRoot, graph.monorepo);
|
|
21
|
+
/**
|
|
22
|
+
* Parse a spec with registry options from a parent node context.
|
|
23
|
+
*
|
|
24
|
+
* Inherits registry configuration from `graph.mainImporter.options` to ensure
|
|
25
|
+
* consistent scope-registry and custom registry mappings. The `fromNode.registry`
|
|
26
|
+
* override allows scoped packages to use their configured registry.
|
|
27
|
+
*/
|
|
28
|
+
const parseSpec = (name, bareSpec, fromNode, graph) => Spec.parse(name, bareSpec, {
|
|
29
|
+
...graph.mainImporter.options,
|
|
30
|
+
registry: fromNode.registry,
|
|
31
|
+
});
|
|
32
|
+
/**
|
|
33
|
+
* Generate a unique cache key for a peer context fork operation.
|
|
34
|
+
*
|
|
35
|
+
* Format: `{baseIndex}::{sortedEntrySignatures}`
|
|
36
|
+
* - `baseIndex`: The parent context's index (0 for initial context)
|
|
37
|
+
* - Entry signature: `{name}|{type}|{targetId}|{spec}` sorted alphabetically
|
|
38
|
+
*
|
|
39
|
+
* This enables caching identical fork operations to avoid creating duplicate
|
|
40
|
+
* peer contexts when the same entries would be added to the same base context.
|
|
41
|
+
*/
|
|
42
|
+
const getForkKey = (peerContext, entries) => {
|
|
43
|
+
const base = peerContext.index ?? 0;
|
|
44
|
+
const sig = entries
|
|
45
|
+
.map(e => `${e.spec.final.name}|${e.type}|${e.target?.id ?? '∅'}|${e.spec}`)
|
|
46
|
+
.sort()
|
|
47
|
+
.join(';');
|
|
48
|
+
return `${base}::${sig}`;
|
|
49
|
+
};
|
|
50
|
+
/**
|
|
51
|
+
* Check if parent declares a dep for peerName that the context target doesn't satisfy.
|
|
52
|
+
* If so, the context entry isn't applicable - return true to ignore the mismatch.
|
|
53
|
+
*
|
|
54
|
+
* This prevents cross-importer peer context leakage. Example scenario:
|
|
55
|
+
* - Root importer has `react@^18` in peer context
|
|
56
|
+
* - Workspace A declares `react@^19` as a dependency
|
|
57
|
+
* - When checking compatibility for Workspace A's deps, the `react@^18` context
|
|
58
|
+
* entry shouldn't force a fork because Workspace A will resolve its own react
|
|
59
|
+
*
|
|
60
|
+
* The logic: if parent declares peerName and the context target doesn't satisfy
|
|
61
|
+
* parent's declared spec, the context entry won't be used anyway, so ignore it.
|
|
62
|
+
*/
|
|
63
|
+
const shouldIgnoreContextMismatch = (peerName, contextTarget, fromNode, graph) => {
|
|
64
|
+
const parentManifest = fromNode.manifest;
|
|
65
|
+
/* c8 ignore next - edge case: fromNode always has manifest in practice */
|
|
66
|
+
if (!parentManifest)
|
|
67
|
+
return false;
|
|
68
|
+
// Search all dependency types for a declaration of peerName
|
|
69
|
+
for (const depType of longDependencyTypes) {
|
|
70
|
+
const declared = parentManifest[depType]?.[peerName];
|
|
71
|
+
if (!declared)
|
|
72
|
+
continue;
|
|
73
|
+
// Parent declares this package - check if context target satisfies it
|
|
74
|
+
const parentSpec = parseSpec(peerName, declared, fromNode, graph);
|
|
75
|
+
// If context target doesn't satisfy parent's spec, ignore the mismatch
|
|
76
|
+
// because parent will resolve its own version anyway
|
|
77
|
+
return !nodeSatisfiesSpec(contextTarget, parentSpec, fromNode, graph);
|
|
78
|
+
}
|
|
79
|
+
return false;
|
|
80
|
+
};
|
|
81
|
+
/**
|
|
82
|
+
* Build incompatible result if target satisfies the peer spec.
|
|
83
|
+
*
|
|
84
|
+
* Returns an incompatible result only when the target node actually satisfies
|
|
85
|
+
* the peer spec. This matters because:
|
|
86
|
+
* - If target satisfies the spec, it's a valid alternative that conflicts with
|
|
87
|
+
* the existing node's peer edge target
|
|
88
|
+
* - If target doesn't satisfy the spec, it's not a valid peer resolution, so
|
|
89
|
+
* there's no conflict to report
|
|
90
|
+
*
|
|
91
|
+
* The returned `forkEntry` contains the conflicting spec and target, which will
|
|
92
|
+
* be used to create a forked peer context with the alternative resolution.
|
|
93
|
+
*/
|
|
94
|
+
const buildIncompatibleResult = (target, peerSpec, type, fromNode, graph) => {
|
|
95
|
+
if (nodeSatisfiesSpec(target, peerSpec, fromNode, graph)) {
|
|
96
|
+
return {
|
|
97
|
+
compatible: false,
|
|
98
|
+
forkEntry: { spec: peerSpec, target, type },
|
|
99
|
+
};
|
|
100
|
+
}
|
|
101
|
+
return undefined;
|
|
102
|
+
};
|
|
103
|
+
/**
|
|
104
|
+
* Check if an existing node's peer edges would still resolve to the same
|
|
105
|
+
* targets from a new parent's context. Returns incompatible info if any
|
|
106
|
+
* peer would resolve differently, meaning the node should NOT be reused.
|
|
107
|
+
*
|
|
108
|
+
* This is crucial for avoiding incorrect node reuse that would break peer
|
|
109
|
+
* dependency contracts. Three sources of conflict are checked:
|
|
110
|
+
*
|
|
111
|
+
* 1. **Peer context entries**: The global peer context may have resolved a
|
|
112
|
+
* different version of a peer dependency than what the existing node expects.
|
|
113
|
+
*
|
|
114
|
+
* 2. **Already-placed siblings**: The parent node may already have an edge to
|
|
115
|
+
* a different version of the peer dependency.
|
|
116
|
+
*
|
|
117
|
+
* 3. **Not-yet-placed siblings**: The parent's manifest declares a dependency
|
|
118
|
+
* on the same package, and there's a graph node that would satisfy it but
|
|
119
|
+
* differs from what the existing node expects.
|
|
120
|
+
*/
|
|
121
|
+
export const checkPeerEdgesCompatible = (existingNode, fromNode, peerContext, graph) => {
|
|
122
|
+
const peerDeps = existingNode.manifest?.peerDependencies;
|
|
123
|
+
// No peer deps = always compatible
|
|
124
|
+
if (!peerDeps || Object.keys(peerDeps).length === 0) {
|
|
125
|
+
return { compatible: true };
|
|
126
|
+
}
|
|
127
|
+
// Per-call memoization: avoid repeated satisfies() calls with identical args
|
|
128
|
+
const parseOpts = {
|
|
129
|
+
...graph.mainImporter.options,
|
|
130
|
+
registry: fromNode.registry,
|
|
131
|
+
};
|
|
132
|
+
const fromLocation = fromNode.location;
|
|
133
|
+
const projectRoot = fromNode.projectRoot;
|
|
134
|
+
const monorepo = graph.monorepo;
|
|
135
|
+
const satisfiesMemo = new Map();
|
|
136
|
+
const satisfiesNodeSpec = (node, spec) => {
|
|
137
|
+
const key = `${node.id}\0${spec.type}\0${spec.bareSpec}\0${String(spec.final)}`;
|
|
138
|
+
let result = satisfiesMemo.get(key);
|
|
139
|
+
if (result === undefined) {
|
|
140
|
+
result = satisfies(node.id, spec, fromLocation, projectRoot, monorepo);
|
|
141
|
+
satisfiesMemo.set(key, result);
|
|
142
|
+
}
|
|
143
|
+
return result;
|
|
144
|
+
};
|
|
145
|
+
const nodeSatisfiesAll = (node, specs) => {
|
|
146
|
+
for (const s of specs) {
|
|
147
|
+
if (!satisfiesNodeSpec(node, s))
|
|
148
|
+
return false;
|
|
149
|
+
}
|
|
150
|
+
return true;
|
|
151
|
+
};
|
|
152
|
+
for (const peerName in peerDeps) {
|
|
153
|
+
const peerBareSpec = peerDeps[peerName];
|
|
154
|
+
/* c8 ignore next - peerDeps[peerName] is always defined when iterating */
|
|
155
|
+
if (!peerBareSpec)
|
|
156
|
+
continue;
|
|
157
|
+
const existingEdge = existingNode.edgesOut.get(peerName);
|
|
158
|
+
// CHECK 0: Reject reuse if peer edge doesn't exist yet (node unprocessed).
|
|
159
|
+
// Cannot verify compatibility since peer resolution depends on original
|
|
160
|
+
// placement context, which may differ from current parent's context.
|
|
161
|
+
// Note: Dangling edges (edge exists, no target) are handled separately below.
|
|
162
|
+
// This conservative check prevents incorrect reuse when placement order varies.
|
|
163
|
+
if (existingEdge === undefined) {
|
|
164
|
+
return { compatible: false };
|
|
165
|
+
}
|
|
166
|
+
// Dangling peer edge (edge exists but unresolved) - skip, nothing to conflict with
|
|
167
|
+
if (!existingEdge.to)
|
|
168
|
+
continue;
|
|
169
|
+
const peerSpec = Spec.parse(peerName, peerBareSpec, parseOpts);
|
|
170
|
+
// CHECK 1: Does peer context have a different target for this peer?
|
|
171
|
+
const contextEntry = peerContext.get(peerName);
|
|
172
|
+
if (contextEntry?.target &&
|
|
173
|
+
contextEntry.target.id !== existingEdge.to.id &&
|
|
174
|
+
!shouldIgnoreContextMismatch(peerName, contextEntry.target, fromNode, graph)) {
|
|
175
|
+
// If existing edge target still satisfies the peer spec, no real conflict.
|
|
176
|
+
// The existing resolution is still valid even if context has a different target.
|
|
177
|
+
// This ensures idempotency when loading from lockfile where peer contexts
|
|
178
|
+
// are rebuilt fresh but existing nodes have valid peer resolutions.
|
|
179
|
+
const existingTarget = existingEdge.to;
|
|
180
|
+
const existingTargetSatisfiesPeer = satisfiesNodeSpec(existingTarget, peerSpec);
|
|
181
|
+
if (existingTargetSatisfiesPeer &&
|
|
182
|
+
nodeSatisfiesAll(existingTarget, contextEntry.specs)) {
|
|
183
|
+
continue; // Truly no conflict
|
|
184
|
+
}
|
|
185
|
+
const result = buildIncompatibleResult(contextEntry.target, peerSpec, contextEntry.type, fromNode, graph);
|
|
186
|
+
if (result)
|
|
187
|
+
return result;
|
|
188
|
+
}
|
|
189
|
+
// CHECK 2: Does parent already have an edge to a different version?
|
|
190
|
+
const siblingEdge = fromNode.edgesOut.get(peerName);
|
|
191
|
+
if (siblingEdge?.to && siblingEdge.to.id !== existingEdge.to.id) {
|
|
192
|
+
// If existing edge target still satisfies the peer spec, no real conflict.
|
|
193
|
+
// Both sibling and existing targets may be valid - prefer keeping existing.
|
|
194
|
+
const existingTarget = existingEdge.to;
|
|
195
|
+
const existingTargetSatisfiesPeer = satisfiesNodeSpec(existingTarget, peerSpec);
|
|
196
|
+
if (existingTargetSatisfiesPeer &&
|
|
197
|
+
satisfiesNodeSpec(existingTarget, siblingEdge.spec)) {
|
|
198
|
+
continue; // Truly no conflict
|
|
199
|
+
}
|
|
200
|
+
const result = buildIncompatibleResult(siblingEdge.to, peerSpec, siblingEdge.type, fromNode, graph);
|
|
201
|
+
if (result)
|
|
202
|
+
return result;
|
|
203
|
+
}
|
|
204
|
+
// CHECK 3: Does parent's manifest declare this peer, with a different
|
|
205
|
+
// satisfying node already in the graph?
|
|
206
|
+
const manifest = fromNode.manifest;
|
|
207
|
+
let declared;
|
|
208
|
+
let declaredType;
|
|
209
|
+
if (manifest) {
|
|
210
|
+
for (const depType of longDependencyTypes) {
|
|
211
|
+
const deps = manifest[depType];
|
|
212
|
+
if (deps &&
|
|
213
|
+
typeof deps === 'object' &&
|
|
214
|
+
!Array.isArray(deps) &&
|
|
215
|
+
peerName in deps) {
|
|
216
|
+
declared = deps[peerName];
|
|
217
|
+
declaredType = depType;
|
|
218
|
+
break;
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
if (declared && declaredType) {
|
|
223
|
+
const parentSpec = Spec.parse(peerName, declared, parseOpts);
|
|
224
|
+
// If existing edge target already satisfies parent's declared spec,
|
|
225
|
+
// there's no conflict - the parent can use the same node as the existing
|
|
226
|
+
// peer edge. Only search for alternatives if existing target is incompatible.
|
|
227
|
+
if (satisfiesNodeSpec(existingEdge.to, parentSpec)) {
|
|
228
|
+
continue; // Existing target works for parent too, no conflict
|
|
229
|
+
}
|
|
230
|
+
// Use nodesByName (deterministic DepID order) instead of full graph scan
|
|
231
|
+
const candidates = graph.nodesByName.get(peerName);
|
|
232
|
+
if (candidates) {
|
|
233
|
+
for (const candidateNode of candidates) {
|
|
234
|
+
if (candidateNode.id !== existingEdge.to.id &&
|
|
235
|
+
satisfiesNodeSpec(candidateNode, parentSpec) &&
|
|
236
|
+
satisfiesNodeSpec(candidateNode, peerSpec)) {
|
|
237
|
+
return {
|
|
238
|
+
compatible: false,
|
|
239
|
+
forkEntry: {
|
|
240
|
+
spec: peerSpec,
|
|
241
|
+
target: candidateNode,
|
|
242
|
+
type: shorten(declaredType),
|
|
243
|
+
},
|
|
244
|
+
};
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
/* c8 ignore next */
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
return { compatible: true };
|
|
252
|
+
};
|
|
253
|
+
/**
|
|
254
|
+
* Retrieve a unique hash value for a given peer context set.
|
|
255
|
+
*/
|
|
256
|
+
export const retrievePeerContextHash = (peerContext) => {
|
|
257
|
+
// skips creating the initial peer context ref
|
|
258
|
+
if (!peerContext?.index)
|
|
259
|
+
return undefined;
|
|
260
|
+
return `peer.${peerContext.index}`;
|
|
261
|
+
};
|
|
262
|
+
/**
|
|
263
|
+
* Checks if a given spec is compatible with the specs already
|
|
264
|
+
* assigned to a peer context entry.
|
|
265
|
+
*
|
|
266
|
+
* Returns true if INCOMPATIBLE, false if compatible.
|
|
267
|
+
*
|
|
268
|
+
* Compatibility rules:
|
|
269
|
+
* - **Registry specs**: Uses semver range intersection. `^18.0.0` and `^18.2.0`
|
|
270
|
+
* intersect (compatible), but `^18.0.0` and `^19.0.0` don't (incompatible).
|
|
271
|
+
* - **Non-registry specs** (git, file, etc.): Requires exact bareSpec match.
|
|
272
|
+
* `github:foo/bar#v1` only matches itself.
|
|
273
|
+
*
|
|
274
|
+
* This is used to determine when peer context forking is needed - if specs
|
|
275
|
+
* are incompatible, a new peer context must be created.
|
|
276
|
+
*/
|
|
277
|
+
export const incompatibleSpecs = (spec, entry) => {
|
|
278
|
+
if (entry.specs.size > 0) {
|
|
279
|
+
for (const s_ of entry.specs) {
|
|
280
|
+
const s = s_.final;
|
|
281
|
+
if (
|
|
282
|
+
// Registry types: check semver range intersection
|
|
283
|
+
(spec.type === 'registry' &&
|
|
284
|
+
(!spec.range ||
|
|
285
|
+
!s.range ||
|
|
286
|
+
!intersects(spec.range, s.range))) ||
|
|
287
|
+
// Non-registry types: require exact bareSpec match
|
|
288
|
+
(spec.type !== 'registry' && spec.bareSpec !== s.bareSpec)) {
|
|
289
|
+
return true;
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
return false;
|
|
294
|
+
};
|
|
295
|
+
/**
|
|
296
|
+
* Sort peer context entry inputs for deterministic processing.
|
|
297
|
+
* Orders: non-peer dependencies first, then peer dependencies, alphabetically by name.
|
|
298
|
+
*/
|
|
299
|
+
export const getOrderedPeerContextEntries = (entries) => [...entries].sort(compareByType);
|
|
300
|
+
/*
|
|
301
|
+
* Checks if there are any conflicting versions for a given dependency
|
|
302
|
+
* to be added to a peer context set which will require forking.
|
|
303
|
+
*
|
|
304
|
+
* Returns true if forking is needed, false otherwise.
|
|
305
|
+
*/
|
|
306
|
+
export const checkEntriesToPeerContext = (peerContext, entries) => {
|
|
307
|
+
// check on compatibility of new entries
|
|
308
|
+
for (const { spec, target } of entries) {
|
|
309
|
+
const name = target?.name ?? spec.final.name;
|
|
310
|
+
// skip any inactive entry
|
|
311
|
+
const entry = peerContext.get(name);
|
|
312
|
+
if (!entry?.active)
|
|
313
|
+
continue;
|
|
314
|
+
// validate if the provided spec is compatible with existing specs
|
|
315
|
+
if (incompatibleSpecs(spec.final, entry)) {
|
|
316
|
+
return true;
|
|
317
|
+
}
|
|
318
|
+
}
|
|
319
|
+
return false;
|
|
320
|
+
};
|
|
321
|
+
/**
|
|
322
|
+
* Add or update dependencies in a given peer context making sure to check
|
|
323
|
+
* for compatibility with existing dependencies already resolved by a given
|
|
324
|
+
* peer context set. Extra info such as a target or dependent nodes is
|
|
325
|
+
* optional.
|
|
326
|
+
*
|
|
327
|
+
* Returns true if forking is needed, false otherwise.
|
|
328
|
+
*/
|
|
329
|
+
export const addEntriesToPeerContext = (peerContext, entries, fromNode, monorepo) => {
|
|
330
|
+
// pre check for conflicts before processing
|
|
331
|
+
if (checkEntriesToPeerContext(peerContext, entries))
|
|
332
|
+
return true;
|
|
333
|
+
for (const { dependent, spec, target, type } of entries) {
|
|
334
|
+
const name = target?.name ?? spec.final.name;
|
|
335
|
+
let entry = peerContext.get(name);
|
|
336
|
+
// create new entry if none exists
|
|
337
|
+
if (!entry) {
|
|
338
|
+
entry = {
|
|
339
|
+
active: true,
|
|
340
|
+
specs: new Set([spec]),
|
|
341
|
+
target,
|
|
342
|
+
type,
|
|
343
|
+
contextDependents: new Set(),
|
|
344
|
+
};
|
|
345
|
+
peerContext.set(name, entry);
|
|
346
|
+
if (dependent)
|
|
347
|
+
entry.contextDependents.add(dependent);
|
|
348
|
+
continue;
|
|
349
|
+
}
|
|
350
|
+
// check for sibling dep conflicts
|
|
351
|
+
if (incompatibleSpecs(spec.final, entry))
|
|
352
|
+
return true;
|
|
353
|
+
// update target if compatible with all specs
|
|
354
|
+
if (target &&
|
|
355
|
+
[...entry.specs].every(s => satisfies(target.id, s, fromNode.location, fromNode.projectRoot, monorepo))) {
|
|
356
|
+
if (target.id !== entry.target?.id &&
|
|
357
|
+
target.version !== entry.target?.version) {
|
|
358
|
+
// update dependents to point to new target
|
|
359
|
+
for (const dep of entry.contextDependents) {
|
|
360
|
+
const edge = dep.edgesOut.get(name);
|
|
361
|
+
if (edge?.to && edge.to !== target) {
|
|
362
|
+
edge.to.edgesIn.delete(edge);
|
|
363
|
+
edge.to = target;
|
|
364
|
+
target.edgesIn.add(edge);
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
entry.target = target;
|
|
368
|
+
}
|
|
369
|
+
entry.target ??= target;
|
|
370
|
+
}
|
|
371
|
+
entry.specs.add(spec);
|
|
372
|
+
if (dependent)
|
|
373
|
+
entry.contextDependents.add(dependent);
|
|
374
|
+
}
|
|
375
|
+
return false;
|
|
376
|
+
};
|
|
377
|
+
/**
|
|
378
|
+
* Create and returns a forked copy of a given peer context set.
|
|
379
|
+
*/
|
|
380
|
+
export const forkPeerContext = (graph, peerContext, entries) => {
|
|
381
|
+
const forkKey = getForkKey(peerContext, entries);
|
|
382
|
+
const cached = graph.peerContextForkCache.get(forkKey);
|
|
383
|
+
if (cached) {
|
|
384
|
+
return cached;
|
|
385
|
+
}
|
|
386
|
+
// create a new peer context set
|
|
387
|
+
const nextPeerContext = new Map();
|
|
388
|
+
nextPeerContext.index = graph.nextPeerContextIndex();
|
|
389
|
+
// register it in the graph
|
|
390
|
+
graph.peerContexts[nextPeerContext.index] = nextPeerContext;
|
|
391
|
+
graph.peerContextForkCache.set(forkKey, nextPeerContext);
|
|
392
|
+
// copy existing entries marking them as inactive, it's also important
|
|
393
|
+
// to note that specs and contextDependents are new objects so that changes
|
|
394
|
+
// to those in the new context do not affect the previous one
|
|
395
|
+
for (const [name, entry] of peerContext.entries()) {
|
|
396
|
+
nextPeerContext.set(name, {
|
|
397
|
+
active: false,
|
|
398
|
+
specs: new Set(entry.specs),
|
|
399
|
+
target: undefined,
|
|
400
|
+
type: entry.type,
|
|
401
|
+
contextDependents: new Set(entry.contextDependents),
|
|
402
|
+
});
|
|
403
|
+
}
|
|
404
|
+
// add the new entries to this peer context set, marking them as active
|
|
405
|
+
// these are the entries that were incompatible with the previous context set
|
|
406
|
+
for (const entry of entries) {
|
|
407
|
+
const { dependent, spec, target, type } = entry;
|
|
408
|
+
const name = target?.name /* c8 ignore next */ ?? spec.final.name;
|
|
409
|
+
const newEntry = {
|
|
410
|
+
active: true,
|
|
411
|
+
specs: new Set([spec]),
|
|
412
|
+
target,
|
|
413
|
+
type,
|
|
414
|
+
contextDependents: dependent ? new Set([dependent]) : new Set(),
|
|
415
|
+
};
|
|
416
|
+
nextPeerContext.set(name, newEntry);
|
|
417
|
+
}
|
|
418
|
+
return nextPeerContext;
|
|
419
|
+
};
|
|
420
|
+
/**
|
|
421
|
+
* Find a peer from queued entries' peer edge closure using BFS.
|
|
422
|
+
*
|
|
423
|
+
* This handles peer dependency cycles like `@isaacs/peer-dep-cycle-a/b/c` where:
|
|
424
|
+
* - A depends on B (peer)
|
|
425
|
+
* - B depends on C (peer)
|
|
426
|
+
* - C depends on A (peer)
|
|
427
|
+
*
|
|
428
|
+
* The BFS explores:
|
|
429
|
+
* 1. Start nodes: All resolved targets from `queuedEntries` (sibling deps)
|
|
430
|
+
* 2. For each node, check if it has an edge to `name` that satisfies `peerSpec`
|
|
431
|
+
* 3. If not found, follow peer edges to explore their peer edges (up to depth 3)
|
|
432
|
+
*
|
|
433
|
+
* Prefers "local" providers (found via sibling's peer edges) over global context.
|
|
434
|
+
*/
|
|
435
|
+
const findFromPeerClosure = (name, peerSpec, queuedEntries, fromNode, graph) => {
|
|
436
|
+
// Start BFS from all resolved sibling targets
|
|
437
|
+
const start = queuedEntries
|
|
438
|
+
.map(e => e.target)
|
|
439
|
+
.filter((n) => !!n);
|
|
440
|
+
const seen = new Set();
|
|
441
|
+
const q = start.map(n => ({
|
|
442
|
+
n,
|
|
443
|
+
depth: 0,
|
|
444
|
+
}));
|
|
445
|
+
while (q.length) {
|
|
446
|
+
const cur = q.shift();
|
|
447
|
+
if (!cur || seen.has(cur.n.id))
|
|
448
|
+
continue;
|
|
449
|
+
seen.add(cur.n.id);
|
|
450
|
+
// Check if this node has an edge to the peer we're looking for
|
|
451
|
+
const edge = cur.n.edgesOut.get(name);
|
|
452
|
+
if (edge?.to &&
|
|
453
|
+
nodeSatisfiesSpec(edge.to, peerSpec, fromNode, graph)) {
|
|
454
|
+
return edge.to;
|
|
455
|
+
}
|
|
456
|
+
// Follow peer edges only (not regular deps) to stay in peer closure
|
|
457
|
+
for (const e of cur.n.edgesOut.values()) {
|
|
458
|
+
if ((e.type === 'peer' || e.type === 'peerOptional') && e.to) {
|
|
459
|
+
q.push({ n: e.to, depth: cur.depth + 1 });
|
|
460
|
+
}
|
|
461
|
+
}
|
|
462
|
+
}
|
|
463
|
+
return undefined;
|
|
464
|
+
};
|
|
465
|
+
/**
|
|
466
|
+
* Starts the peer dependency placement process
|
|
467
|
+
* for a given node being processed and placed.
|
|
468
|
+
*/
|
|
469
|
+
export const startPeerPlacement = (peerContext, manifest, fromNode, options) => {
|
|
470
|
+
// queue entries so that they can be added at the end of the placement
|
|
471
|
+
// process, use a map to ensure deduplication between read json dep
|
|
472
|
+
// values and the resolved edges in the graph
|
|
473
|
+
const queueMap = new Map();
|
|
474
|
+
let peerSetHash;
|
|
475
|
+
if (manifest.peerDependencies &&
|
|
476
|
+
Object.keys(manifest.peerDependencies).length > 0) {
|
|
477
|
+
// generates a peer context set hash for nodes that
|
|
478
|
+
// have peer dependencies to be resolved
|
|
479
|
+
peerSetHash = retrievePeerContextHash(peerContext);
|
|
480
|
+
// get any potential sibling dependency from the
|
|
481
|
+
// parent node that might have not been parsed yet
|
|
482
|
+
const siblingDeps = getDependencies(fromNode, {
|
|
483
|
+
...options,
|
|
484
|
+
registry: fromNode.registry,
|
|
485
|
+
});
|
|
486
|
+
for (const [depName, dep] of siblingDeps) {
|
|
487
|
+
queueMap.set(depName, dep);
|
|
488
|
+
}
|
|
489
|
+
// collect the already parsed nodes and add those to the
|
|
490
|
+
// list of entries to be added to the peer context set
|
|
491
|
+
for (const edge of fromNode.edgesOut.values()) {
|
|
492
|
+
queueMap.set(edge.name, {
|
|
493
|
+
spec: edge.spec,
|
|
494
|
+
target: edge.to,
|
|
495
|
+
type: edge.type,
|
|
496
|
+
});
|
|
497
|
+
}
|
|
498
|
+
}
|
|
499
|
+
return {
|
|
500
|
+
peerSetHash,
|
|
501
|
+
// Sort queuedEntries for deterministic order
|
|
502
|
+
queuedEntries: getOrderedPeerContextEntries([
|
|
503
|
+
...queueMap.values(),
|
|
504
|
+
]),
|
|
505
|
+
};
|
|
506
|
+
};
|
|
507
|
+
/**
|
|
508
|
+
* Ends the peer dependency placement process, returning the functions that
|
|
509
|
+
* are going to be used to update the peer context set, forking when needed
|
|
510
|
+
* and resolving peer dependencies if possible.
|
|
511
|
+
*
|
|
512
|
+
* Returns two deferred functions:
|
|
513
|
+
* - `putEntries()`: Adds entries to peer context; returns fork entries if conflict
|
|
514
|
+
* - `resolvePeerDeps()`: Resolves peer deps from context/siblings or adds to nextDeps
|
|
515
|
+
*
|
|
516
|
+
* These are deferred (not executed immediately) so that all siblings at a level
|
|
517
|
+
* can be processed before peer context updates, enabling context reuse optimization.
|
|
518
|
+
*/
|
|
519
|
+
export const endPeerPlacement = (peerContext, nextDeps, nextPeerDeps, graph, spec, fromNode, node, type, queuedEntries) => ({
|
|
520
|
+
/**
|
|
521
|
+
* Add the new entries to the current peer context set.
|
|
522
|
+
*
|
|
523
|
+
* Two sets of entries are checked:
|
|
524
|
+
* - `prevEntries`: Parent's queued entries + self-reference
|
|
525
|
+
* - `nextEntries`: This node's deps + peer deps (with node as dependent)
|
|
526
|
+
*
|
|
527
|
+
* If either conflicts with the current context, returns ALL entries to be
|
|
528
|
+
* added to a forked context (prevEntries last for priority).
|
|
529
|
+
*
|
|
530
|
+
* Returns `undefined` if no fork needed (entries added directly to context).
|
|
531
|
+
*/
|
|
532
|
+
putEntries: () => {
|
|
533
|
+
// add queued entries from this node parents along
|
|
534
|
+
// with a self-ref to the current peer context set
|
|
535
|
+
const prevEntries = [
|
|
536
|
+
...queuedEntries,
|
|
537
|
+
/* ref itself */ {
|
|
538
|
+
spec,
|
|
539
|
+
target: node,
|
|
540
|
+
type,
|
|
541
|
+
},
|
|
542
|
+
];
|
|
543
|
+
const nextEntries = [
|
|
544
|
+
...nextDeps.map(dep => ({ ...dep, dependent: node })),
|
|
545
|
+
...[...nextPeerDeps.values()].map(dep => ({
|
|
546
|
+
...dep,
|
|
547
|
+
dependent: node,
|
|
548
|
+
})),
|
|
549
|
+
];
|
|
550
|
+
const conflictPrev = checkEntriesToPeerContext(peerContext, prevEntries);
|
|
551
|
+
const conflictNext = nextEntries.length > 0 &&
|
|
552
|
+
checkEntriesToPeerContext(peerContext, nextEntries);
|
|
553
|
+
if (conflictPrev || conflictNext) {
|
|
554
|
+
// returns all entries that need to be added to a forked context
|
|
555
|
+
// giving priority to parent entries (prevEntries) by placing them last
|
|
556
|
+
return [...nextEntries, ...prevEntries];
|
|
557
|
+
}
|
|
558
|
+
addEntriesToPeerContext(peerContext, prevEntries, fromNode, graph.monorepo);
|
|
559
|
+
if (nextEntries.length > 0) {
|
|
560
|
+
addEntriesToPeerContext(peerContext, nextEntries, node, graph.monorepo);
|
|
561
|
+
}
|
|
562
|
+
return undefined;
|
|
563
|
+
},
|
|
564
|
+
/**
|
|
565
|
+
* Try to resolve peer dependencies using already seen target
|
|
566
|
+
* values from the current peer context set.
|
|
567
|
+
*
|
|
568
|
+
* Resolution priority (highest to lowest):
|
|
569
|
+
* 1. Sibling deps from parent (workspace direct deps take priority)
|
|
570
|
+
* 2. Peer-edge closure of sibling targets (handles peer cycles)
|
|
571
|
+
* 3. Global peer context set entries
|
|
572
|
+
* 4. Add to nextDeps for normal resolution (or create dangling edge for optional)
|
|
573
|
+
*/
|
|
574
|
+
resolvePeerDeps: () => {
|
|
575
|
+
for (const nextDep of nextPeerDeps.values()) {
|
|
576
|
+
const { spec, type } = nextDep;
|
|
577
|
+
/* c8 ignore next - only peer types reach here by design */
|
|
578
|
+
if (type !== 'peer' && type !== 'peerOptional')
|
|
579
|
+
continue;
|
|
580
|
+
const name = spec.final.name;
|
|
581
|
+
// PRIORITY 1: Sibling deps from parent
|
|
582
|
+
// These take priority because workspace's direct deps should win over
|
|
583
|
+
// versions from other workspaces that may be in the peer context
|
|
584
|
+
const siblingEntry = queuedEntries.find(e => (e.target?.name ?? e.spec.final.name) === name);
|
|
585
|
+
const siblingTarget = siblingEntry?.target ?? fromNode.edgesOut.get(name)?.to;
|
|
586
|
+
if (siblingTarget &&
|
|
587
|
+
nodeSatisfiesSpec(siblingTarget, spec, fromNode, graph)) {
|
|
588
|
+
// Override existing edge if pointing elsewhere (sibling must win)
|
|
589
|
+
const existingEdge = node.edgesOut.get(name);
|
|
590
|
+
if (existingEdge?.to && existingEdge.to !== siblingTarget) {
|
|
591
|
+
existingEdge.to.edgesIn.delete(existingEdge);
|
|
592
|
+
existingEdge.to = siblingTarget;
|
|
593
|
+
siblingTarget.edgesIn.add(existingEdge);
|
|
594
|
+
}
|
|
595
|
+
else if (!existingEdge) {
|
|
596
|
+
graph.addEdge(type, spec, node, siblingTarget);
|
|
597
|
+
}
|
|
598
|
+
continue;
|
|
599
|
+
}
|
|
600
|
+
// PRIORITY 2: Peer-edge closure of sibling targets
|
|
601
|
+
// Handles cycles like A->B(peer)->C(peer)->A(peer)
|
|
602
|
+
const localPeer = findFromPeerClosure(name, spec, queuedEntries, fromNode, graph);
|
|
603
|
+
if (localPeer && !node.edgesOut.has(name)) {
|
|
604
|
+
graph.addEdge(type, spec, node, localPeer);
|
|
605
|
+
continue;
|
|
606
|
+
}
|
|
607
|
+
// PRIORITY 3: Global peer context set
|
|
608
|
+
const entry = peerContext.get(name);
|
|
609
|
+
if (!node.edgesOut.has(name) &&
|
|
610
|
+
entry?.target &&
|
|
611
|
+
nodeSatisfiesSpec(entry.target, spec, fromNode, graph)) {
|
|
612
|
+
graph.addEdge(type, spec, node, entry.target);
|
|
613
|
+
entry.specs.add(spec.final);
|
|
614
|
+
continue;
|
|
615
|
+
}
|
|
616
|
+
// PRIORITY 4: Fallback - add to nextDeps or create dangling edge
|
|
617
|
+
if (type === 'peerOptional') {
|
|
618
|
+
// Optional peers that can't be resolved get a dangling edge
|
|
619
|
+
graph.addEdge(type, spec, node);
|
|
620
|
+
}
|
|
621
|
+
else if (siblingEntry &&
|
|
622
|
+
siblingEntry.spec.bareSpec !== spec.bareSpec) {
|
|
623
|
+
// Sibling has a more specific spec - use it for resolution
|
|
624
|
+
nextDeps.push({ ...nextDep, spec: siblingEntry.spec });
|
|
625
|
+
}
|
|
626
|
+
else {
|
|
627
|
+
// Add to next deps for normal resolution in upcoming levels
|
|
628
|
+
nextDeps.push(nextDep);
|
|
629
|
+
}
|
|
630
|
+
}
|
|
631
|
+
},
|
|
632
|
+
});
|
|
633
|
+
/**
|
|
634
|
+
* Given an array of processed results for the current level dependencies
|
|
635
|
+
* being placed in the currently building ideal graph, traverse its direct
|
|
636
|
+
* dependencies and track peer dependencies in their appropriate peer context
|
|
637
|
+
* sets, forking as needed and resolving peer dependencies using suitable
|
|
638
|
+
* nodes already present in the graph if possible.
|
|
639
|
+
*
|
|
640
|
+
* This is the core peer context management algorithm, executed after each
|
|
641
|
+
* BFS level. It runs in three phases:
|
|
642
|
+
*
|
|
643
|
+
* **Phase 1: Collect fork requirements**
|
|
644
|
+
* Call `putEntries()` on each child dep to add entries to peer context.
|
|
645
|
+
* Collect which children need forked contexts (due to conflicts).
|
|
646
|
+
*
|
|
647
|
+
* **Phase 2: Fork or reuse contexts**
|
|
648
|
+
* For children needing forks, try to reuse a sibling's forked context if
|
|
649
|
+
* compatible. This optimization reduces the number of peer contexts created.
|
|
650
|
+
*
|
|
651
|
+
* **Phase 3: Resolve peer deps**
|
|
652
|
+
* With contexts finalized, call `resolvePeerDeps()` to create edges for
|
|
653
|
+
* peers that can be satisfied from context/siblings, or add them to nextDeps.
|
|
654
|
+
*
|
|
655
|
+
* All operations are sorted by `node.id` for deterministic, reproducible builds.
|
|
656
|
+
*/
|
|
657
|
+
export const postPlacementPeerCheck = (graph, sortedLevelResults) => {
|
|
658
|
+
for (const childDepsToProcess of sortedLevelResults) {
|
|
659
|
+
// Sort by node.id for deterministic processing order
|
|
660
|
+
const sortedChildDeps = [...childDepsToProcess].sort((a, b) => a.node.id.localeCompare(b.node.id, 'en'));
|
|
661
|
+
// PHASE 1: Collect which children need forked contexts
|
|
662
|
+
const needsForking = new Map();
|
|
663
|
+
for (const childDep of sortedChildDeps) {
|
|
664
|
+
const needsFork = childDep.updateContext.putEntries();
|
|
665
|
+
if (needsFork) {
|
|
666
|
+
needsForking.set(childDep, needsFork);
|
|
667
|
+
}
|
|
668
|
+
}
|
|
669
|
+
// Sort forking entries for deterministic fork order
|
|
670
|
+
const sortedNeedsForkingEntries = [
|
|
671
|
+
...needsForking.entries(),
|
|
672
|
+
].sort(([a], [b]) => a.node.id.localeCompare(b.node.id, 'en'));
|
|
673
|
+
// PHASE 2: Fork or reuse sibling contexts
|
|
674
|
+
// Track previous context for potential reuse by next sibling
|
|
675
|
+
let prevContext;
|
|
676
|
+
for (const [childDep, nextEntries] of sortedNeedsForkingEntries) {
|
|
677
|
+
// Optimization: try to reuse previous sibling's forked context
|
|
678
|
+
// if its entries are compatible with this child's entries
|
|
679
|
+
if (prevContext &&
|
|
680
|
+
!checkEntriesToPeerContext(prevContext, nextEntries)) {
|
|
681
|
+
addEntriesToPeerContext(prevContext, nextEntries, childDep.node, graph.monorepo);
|
|
682
|
+
childDep.peerContext = prevContext;
|
|
683
|
+
continue;
|
|
684
|
+
}
|
|
685
|
+
// Can't reuse - create a new forked context
|
|
686
|
+
childDep.peerContext = forkPeerContext(graph, childDep.peerContext, nextEntries);
|
|
687
|
+
prevContext = childDep.peerContext;
|
|
688
|
+
}
|
|
689
|
+
// PHASE 3: Resolve peer deps with finalized contexts
|
|
690
|
+
for (const childDep of sortedChildDeps) {
|
|
691
|
+
childDep.updateContext.resolvePeerDeps();
|
|
692
|
+
// Re-order deps for deterministic next-level processing
|
|
693
|
+
childDep.deps = getOrderedDependencies(childDep.deps);
|
|
694
|
+
}
|
|
695
|
+
}
|
|
696
|
+
};
|