@vltpkg/graph 1.0.0-rc.23 → 1.0.0-rc.24
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/actual/load.d.ts +107 -0
- package/dist/actual/load.js +336 -0
- package/dist/browser.d.ts +14 -0
- package/dist/browser.js +16 -0
- package/dist/build.d.ts +28 -0
- package/dist/build.js +78 -0
- package/dist/dependencies.d.ts +65 -0
- package/dist/dependencies.js +111 -0
- package/dist/diff.d.ts +119 -0
- package/dist/diff.js +151 -0
- package/dist/edge.d.ts +46 -0
- package/dist/edge.js +77 -0
- package/dist/fixup-added-names.d.ts +18 -0
- package/dist/fixup-added-names.js +46 -0
- package/dist/graph.d.ts +153 -0
- package/dist/graph.js +444 -0
- package/dist/ideal/append-nodes.d.ts +31 -0
- package/dist/ideal/append-nodes.js +560 -0
- package/dist/ideal/build-ideal-from-starting-graph.d.ts +14 -0
- package/dist/ideal/build-ideal-from-starting-graph.js +69 -0
- package/dist/ideal/build.d.ts +40 -0
- package/dist/ideal/build.js +84 -0
- package/dist/ideal/get-importer-specs.d.ts +20 -0
- package/dist/ideal/get-importer-specs.js +180 -0
- package/dist/ideal/peers.d.ts +160 -0
- package/dist/ideal/peers.js +696 -0
- package/dist/ideal/refresh-ideal-graph.d.ts +43 -0
- package/dist/ideal/refresh-ideal-graph.js +62 -0
- package/dist/ideal/remove-satisfied-specs.d.ts +7 -0
- package/dist/ideal/remove-satisfied-specs.js +34 -0
- package/dist/ideal/sorting.d.ts +45 -0
- package/dist/ideal/sorting.js +70 -0
- package/dist/ideal/types.d.ts +107 -0
- package/dist/ideal/types.js +1 -0
- package/dist/index.d.ts +38 -0
- package/dist/index.js +32 -0
- package/dist/install.d.ts +19 -0
- package/dist/install.js +208 -0
- package/dist/lockfile/load-edges.d.ts +11 -0
- package/dist/lockfile/load-edges.js +105 -0
- package/dist/lockfile/load-nodes.d.ts +4 -0
- package/dist/lockfile/load-nodes.js +101 -0
- package/dist/lockfile/load.d.ts +45 -0
- package/dist/lockfile/load.js +84 -0
- package/dist/lockfile/save.d.ts +30 -0
- package/dist/lockfile/save.js +174 -0
- package/dist/lockfile/types.d.ts +95 -0
- package/dist/lockfile/types.js +49 -0
- package/dist/modifiers.d.ts +188 -0
- package/dist/modifiers.js +329 -0
- package/dist/node.d.ts +234 -0
- package/dist/node.js +388 -0
- package/dist/non-empty-list.d.ts +2 -0
- package/dist/non-empty-list.js +2 -0
- package/dist/reify/add-edge.d.ts +9 -0
- package/dist/reify/add-edge.js +71 -0
- package/dist/reify/add-edges.d.ts +4 -0
- package/dist/reify/add-edges.js +12 -0
- package/dist/reify/add-nodes.d.ts +6 -0
- package/dist/reify/add-nodes.js +16 -0
- package/dist/reify/bin-chmod.d.ts +10 -0
- package/dist/reify/bin-chmod.js +38 -0
- package/dist/reify/build.d.ts +13 -0
- package/dist/reify/build.js +111 -0
- package/dist/reify/calculate-save-value.d.ts +2 -0
- package/dist/reify/calculate-save-value.js +50 -0
- package/dist/reify/check-needed-build.d.ts +34 -0
- package/dist/reify/check-needed-build.js +71 -0
- package/dist/reify/delete-edge.d.ts +4 -0
- package/dist/reify/delete-edge.js +27 -0
- package/dist/reify/delete-edges.d.ts +4 -0
- package/dist/reify/delete-edges.js +13 -0
- package/dist/reify/delete-nodes.d.ts +4 -0
- package/dist/reify/delete-nodes.js +15 -0
- package/dist/reify/extract-node.d.ts +23 -0
- package/dist/reify/extract-node.js +83 -0
- package/dist/reify/index.d.ts +34 -0
- package/dist/reify/index.js +161 -0
- package/dist/reify/internal-hoist.d.ts +8 -0
- package/dist/reify/internal-hoist.js +133 -0
- package/dist/reify/optional-fail.d.ts +15 -0
- package/dist/reify/optional-fail.js +15 -0
- package/dist/reify/rollback.d.ts +4 -0
- package/dist/reify/rollback.js +23 -0
- package/dist/reify/update-importers-package-json.d.ts +35 -0
- package/dist/reify/update-importers-package-json.js +122 -0
- package/dist/remove-optional-subgraph.d.ts +33 -0
- package/dist/remove-optional-subgraph.js +47 -0
- package/dist/resolve-save-type.d.ts +5 -0
- package/dist/resolve-save-type.js +4 -0
- package/dist/stringify-node.d.ts +2 -0
- package/dist/stringify-node.js +32 -0
- package/dist/transfer-data/load.d.ts +43 -0
- package/dist/transfer-data/load.js +175 -0
- package/dist/uninstall.d.ts +14 -0
- package/dist/uninstall.js +75 -0
- package/dist/update.d.ts +12 -0
- package/dist/update.js +73 -0
- package/dist/virtual-root.d.ts +15 -0
- package/dist/virtual-root.js +78 -0
- package/dist/visualization/human-readable-output.d.ts +26 -0
- package/dist/visualization/human-readable-output.js +163 -0
- package/dist/visualization/json-output.d.ts +41 -0
- package/dist/visualization/json-output.js +50 -0
- package/dist/visualization/mermaid-output.d.ts +17 -0
- package/dist/visualization/mermaid-output.js +170 -0
- package/dist/visualization/object-like-output.d.ts +2 -0
- package/dist/visualization/object-like-output.js +47 -0
- package/package.json +22 -22
|
@@ -0,0 +1,560 @@
|
|
|
1
|
+
import { joinDepIDTuple, joinExtra } from '@vltpkg/dep-id';
|
|
2
|
+
import { error } from '@vltpkg/error-cause';
|
|
3
|
+
import { Spec } from '@vltpkg/spec';
|
|
4
|
+
import { satisfies } from '@vltpkg/satisfies';
|
|
5
|
+
import { longDependencyTypes, normalizeManifest } from '@vltpkg/types';
|
|
6
|
+
import { fixupAddedNames } from "../fixup-added-names.js";
|
|
7
|
+
import { shorten } from "../dependencies.js";
|
|
8
|
+
import { removeOptionalSubgraph } from "../remove-optional-subgraph.js";
|
|
9
|
+
import { extractNode } from "../reify/extract-node.js";
|
|
10
|
+
import { checkPeerEdgesCompatible, endPeerPlacement, forkPeerContext, postPlacementPeerCheck, startPeerPlacement, } from "./peers.js";
|
|
11
|
+
import { compareByHasPeerDeps } from "./sorting.js";
|
|
12
|
+
/**
|
|
13
|
+
* Only install devDeps for git dependencies and importers
|
|
14
|
+
* Everything else always gets installed
|
|
15
|
+
*/
|
|
16
|
+
const shouldInstallDepType = (node, depType) => depType !== 'devDependencies' ||
|
|
17
|
+
node.importer ||
|
|
18
|
+
node.id.startsWith('git');
|
|
19
|
+
/**
|
|
20
|
+
* Retrieve the {@link DepID} and location for a `file:` type {@link Node}.
|
|
21
|
+
*/
|
|
22
|
+
const getFileTypeInfo = (spec, fromNode, scurry) => {
|
|
23
|
+
const f = spec.final;
|
|
24
|
+
if (f.type !== 'file')
|
|
25
|
+
return;
|
|
26
|
+
/* c8 ignore start - should be impossible */
|
|
27
|
+
if (!f.file) {
|
|
28
|
+
throw error('no path on file specifier', { spec });
|
|
29
|
+
}
|
|
30
|
+
/* c8 ignore stop */
|
|
31
|
+
// Given that both linked folders and local tarballs (both defined with
|
|
32
|
+
// usage of the `file:` spec prefix) location needs to be relative to their
|
|
33
|
+
// parents, build the expected path and use it for both location and id
|
|
34
|
+
const target = scurry.cwd.resolve(fromNode.location).resolve(f.file);
|
|
35
|
+
const path = target.relativePosix();
|
|
36
|
+
const id = joinDepIDTuple(['file', path]);
|
|
37
|
+
return {
|
|
38
|
+
path,
|
|
39
|
+
id,
|
|
40
|
+
isDirectory: !!target.lstatSync()?.isDirectory(),
|
|
41
|
+
};
|
|
42
|
+
};
|
|
43
|
+
const isStringArray = (a) => Array.isArray(a) && !a.some(b => typeof b !== 'string');
|
|
44
|
+
/**
|
|
45
|
+
* Try to find a compatible resolution for a dependency, checking peer context.
|
|
46
|
+
* If the first resolution candidate is incompatible with the peer context,
|
|
47
|
+
* try other candidates.
|
|
48
|
+
*/
|
|
49
|
+
const findCompatibleResolution = (spec, fromNode, graph, peerContext, queryModifier, _peer) => {
|
|
50
|
+
// Hoist invariants once
|
|
51
|
+
const fromLoc = fromNode.location;
|
|
52
|
+
const projectRoot = graph.projectRoot;
|
|
53
|
+
const monorepo = graph.monorepo;
|
|
54
|
+
const final = spec.final;
|
|
55
|
+
// Memoize satisfies() results per-node within this resolution attempt
|
|
56
|
+
const satisfiesCache = new Map();
|
|
57
|
+
const satisfiesFinal = (n) => {
|
|
58
|
+
const key = n.id;
|
|
59
|
+
const cached = satisfiesCache.get(key);
|
|
60
|
+
/* c8 ignore next 3 - optimization: cache hit when same node checked multiple times */
|
|
61
|
+
if (cached !== undefined) {
|
|
62
|
+
return cached;
|
|
63
|
+
}
|
|
64
|
+
const result = satisfies(key, final, fromLoc, projectRoot, monorepo);
|
|
65
|
+
satisfiesCache.set(key, result);
|
|
66
|
+
return result;
|
|
67
|
+
};
|
|
68
|
+
// Prefer existing edge target if it satisfies the spec.
|
|
69
|
+
// This ensures lockfile resolutions are preserved when still valid,
|
|
70
|
+
// rather than potentially picking a different satisfying version.
|
|
71
|
+
const existingEdge = fromNode.edgesOut.get(spec.name);
|
|
72
|
+
let existingNode;
|
|
73
|
+
if (existingEdge?.to &&
|
|
74
|
+
!existingEdge.to.detached &&
|
|
75
|
+
satisfiesFinal(existingEdge.to)) {
|
|
76
|
+
existingNode = existingEdge.to;
|
|
77
|
+
}
|
|
78
|
+
else {
|
|
79
|
+
existingNode = graph.findResolution(spec, fromNode, queryModifier);
|
|
80
|
+
}
|
|
81
|
+
let peerCompatResult = existingNode ?
|
|
82
|
+
checkPeerEdgesCompatible(existingNode, fromNode, peerContext, graph)
|
|
83
|
+
: { compatible: true };
|
|
84
|
+
// CANDIDATE FALLBACK: If first candidate is peer-incompatible, try others
|
|
85
|
+
// Lazy-load candidates only when fallback needed
|
|
86
|
+
if (existingNode && !peerCompatResult.compatible) {
|
|
87
|
+
const candidates = graph.nodesByName.get(final.name);
|
|
88
|
+
if (candidates && candidates.size > 1) {
|
|
89
|
+
for (const candidate of candidates) {
|
|
90
|
+
if (candidate === existingNode)
|
|
91
|
+
continue;
|
|
92
|
+
if (candidate.detached)
|
|
93
|
+
continue;
|
|
94
|
+
if (!satisfiesFinal(candidate))
|
|
95
|
+
continue;
|
|
96
|
+
const compat = checkPeerEdgesCompatible(candidate, fromNode, peerContext, graph);
|
|
97
|
+
if (compat.compatible) {
|
|
98
|
+
existingNode = candidate;
|
|
99
|
+
peerCompatResult = compat;
|
|
100
|
+
break;
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
return { existingNode, peerCompatResult };
|
|
106
|
+
};
|
|
107
|
+
/**
|
|
108
|
+
* Fetch manifests for dependencies and create placement tasks.
|
|
109
|
+
*
|
|
110
|
+
* This is Phase 1 of the breadth-first graph building process. For each
|
|
111
|
+
* dependency at the current level:
|
|
112
|
+
* 1. Apply any active modifiers (spec swapping)
|
|
113
|
+
* 2. Try to find an existing node to reuse (with peer compatibility check)
|
|
114
|
+
* 3. If no reusable node, start a manifest fetch (in parallel)
|
|
115
|
+
* 4. Create placement tasks for Phase 2
|
|
116
|
+
*
|
|
117
|
+
* The result is sorted to process non-peer-dependent packages first,
|
|
118
|
+
* ensuring peer dependencies can resolve to already-placed siblings.
|
|
119
|
+
*
|
|
120
|
+
* **Read-only**: This function no longer mutates the graph. It returns
|
|
121
|
+
* tasks that will be applied serially in the BFS loop for deterministic ordering.
|
|
122
|
+
*/
|
|
123
|
+
const fetchManifestsForDeps = async (packageInfo, graph, fromNode, deps, scurry, peerContext, modifierRefs, depth = 0) => {
|
|
124
|
+
const fetchTasks = [];
|
|
125
|
+
const placementTasks = [];
|
|
126
|
+
const reuseTasks = [];
|
|
127
|
+
const forkRequests = [];
|
|
128
|
+
for (const { spec: originalSpec, type } of deps) {
|
|
129
|
+
let spec = originalSpec;
|
|
130
|
+
const fileTypeInfo = getFileTypeInfo(spec, fromNode, scurry);
|
|
131
|
+
const activeModifier = modifierRefs?.get(spec.name);
|
|
132
|
+
// MODIFIER HANDLING: Swap spec if an edge modifier is fully matched
|
|
133
|
+
// Example: `vlt install --override "react:^19"` changes react's spec
|
|
134
|
+
const queryModifier = activeModifier?.modifier.query;
|
|
135
|
+
const completeModifier = activeModifier &&
|
|
136
|
+
activeModifier.interactiveBreadcrumb.current ===
|
|
137
|
+
activeModifier.modifier.breadcrumb.last;
|
|
138
|
+
if (queryModifier &&
|
|
139
|
+
completeModifier &&
|
|
140
|
+
'spec' in activeModifier.modifier) {
|
|
141
|
+
spec = activeModifier.modifier.spec;
|
|
142
|
+
// bareSpec of '-' means "remove this dependency"
|
|
143
|
+
if (spec.bareSpec === '-') {
|
|
144
|
+
continue;
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
const peer = type === 'peer' || type === 'peerOptional';
|
|
148
|
+
// NODE REUSE LOGIC with peer compatibility
|
|
149
|
+
const { existingNode, peerCompatResult } = findCompatibleResolution(spec, fromNode, graph, peerContext, queryModifier, peer);
|
|
150
|
+
// Accumulate fork request if incompatible peer edges detected (defer actual fork)
|
|
151
|
+
const effectivePeerContext = peerContext;
|
|
152
|
+
/* c8 ignore start */
|
|
153
|
+
if (!peerCompatResult.compatible && peerCompatResult.forkEntry) {
|
|
154
|
+
forkRequests.push(peerCompatResult.forkEntry);
|
|
155
|
+
// All fork entries from this fromNode will be applied together in Phase B
|
|
156
|
+
}
|
|
157
|
+
/* c8 ignore stop */
|
|
158
|
+
// defines what nodes are eligible to be reused
|
|
159
|
+
const validExistingNode = existingNode &&
|
|
160
|
+
!existingNode.detached &&
|
|
161
|
+
// Regular deps can always reuse.
|
|
162
|
+
// Peer deps can reuse as well if their peer edges are compatible.
|
|
163
|
+
/* c8 ignore start */
|
|
164
|
+
(!peer || peerCompatResult.compatible) &&
|
|
165
|
+
// Check if existing node's peer edges are compatible with new parent
|
|
166
|
+
peerCompatResult.compatible;
|
|
167
|
+
/* c8 ignore stop */
|
|
168
|
+
if (validExistingNode ||
|
|
169
|
+
// importers are handled at the ./refresh-ideal-graph.ts top-level
|
|
170
|
+
// so we should just skip whenever we find one
|
|
171
|
+
existingNode?.importer) {
|
|
172
|
+
// Defer edge creation to Phase B for deterministic ordering.
|
|
173
|
+
// Previously added immediately, but this caused race conditions when
|
|
174
|
+
// parallel fetches completed in different orders.
|
|
175
|
+
reuseTasks.push({ type, spec, fromNode, toNode: existingNode });
|
|
176
|
+
continue;
|
|
177
|
+
}
|
|
178
|
+
// is the current edge pointint go an optional dependency?
|
|
179
|
+
const edgeOptional = type === 'optional' || type === 'peerOptional';
|
|
180
|
+
// Start manifest fetch immediately for parallel processing
|
|
181
|
+
const manifestPromise =
|
|
182
|
+
// the "detached" node state means that it has already been load as
|
|
183
|
+
// part of a graph (either lockfile or actual) and it has valid manifest
|
|
184
|
+
// data so we shortcut the package info manifest fetch here
|
|
185
|
+
existingNode?.detached && existingNode.manifest ?
|
|
186
|
+
Promise.resolve(existingNode.manifest)
|
|
187
|
+
// this is the entry point to fetch calls to retrieve manifests
|
|
188
|
+
// from the build ideal graph point of view
|
|
189
|
+
: packageInfo
|
|
190
|
+
.manifest(spec, { from: scurry.resolve(fromNode.location) })
|
|
191
|
+
.then(manifest => manifest)
|
|
192
|
+
.catch((er) => {
|
|
193
|
+
// optional deps ignored if inaccessible
|
|
194
|
+
if (edgeOptional || fromNode.optional) {
|
|
195
|
+
return undefined;
|
|
196
|
+
}
|
|
197
|
+
throw er;
|
|
198
|
+
});
|
|
199
|
+
const fetchTask = {
|
|
200
|
+
spec,
|
|
201
|
+
type,
|
|
202
|
+
fromNode,
|
|
203
|
+
fileTypeInfo,
|
|
204
|
+
activeModifier,
|
|
205
|
+
queryModifier,
|
|
206
|
+
edgeOptional,
|
|
207
|
+
manifestPromise,
|
|
208
|
+
depth,
|
|
209
|
+
peerContext: effectivePeerContext,
|
|
210
|
+
};
|
|
211
|
+
fetchTasks.push(fetchTask);
|
|
212
|
+
}
|
|
213
|
+
// Create placement tasks from fetch tasks
|
|
214
|
+
for (const fetchTask of fetchTasks) {
|
|
215
|
+
const manifest = await fetchTask.manifestPromise;
|
|
216
|
+
placementTasks.push({
|
|
217
|
+
fetchTask,
|
|
218
|
+
manifest,
|
|
219
|
+
});
|
|
220
|
+
}
|
|
221
|
+
// sort placement tasks: non-peer dependencies first, then peer dependencies
|
|
222
|
+
// so that peer dependencies can easily reuse already placed regular
|
|
223
|
+
// dependencies as part of peer context set resolution also makes sure to
|
|
224
|
+
// sort by the manifest name for deterministic order.
|
|
225
|
+
placementTasks.sort(compareByHasPeerDeps);
|
|
226
|
+
return { placementTasks, reuseTasks, forkRequests };
|
|
227
|
+
};
|
|
228
|
+
/**
|
|
229
|
+
* Process placement tasks and collect child dependencies.
|
|
230
|
+
*
|
|
231
|
+
* This is Phase 2 of the breadth-first graph building process. For each
|
|
232
|
+
* resolved manifest:
|
|
233
|
+
* 1. Handle missing manifests (optional vs required deps)
|
|
234
|
+
* 2. Start peer placement process (collect sibling context)
|
|
235
|
+
* 3. Place the node in the graph with appropriate flags
|
|
236
|
+
* 4. Trigger early extraction if eligible (performance optimization)
|
|
237
|
+
* 5. Collect child dependencies for the next BFS level
|
|
238
|
+
* 6. End peer placement (setup context update functions)
|
|
239
|
+
*
|
|
240
|
+
* Early extraction: When `actual` graph is provided, nodes destined for the
|
|
241
|
+
* vlt store are extracted immediately (in parallel) instead of waiting for
|
|
242
|
+
* the full ideal graph to be built. This significantly improves install time.
|
|
243
|
+
*/
|
|
244
|
+
const processPlacementTasks = async (graph, options, placementTasks, add, modifiers, scurry, packageInfo, extractPromises, actual, seenExtracted, remover, transientAdd, transientRemove) => {
|
|
245
|
+
const childDepsToProcess = [];
|
|
246
|
+
// Note: placementTasks are already sorted by fetchManifestsForDeps
|
|
247
|
+
// using compareByHasPeerDeps to ensure non-peer deps are processed first.
|
|
248
|
+
// We don't sort again here to preserve that ordering.
|
|
249
|
+
for (const placementTask of placementTasks) {
|
|
250
|
+
const { fetchTask, manifest } = placementTask;
|
|
251
|
+
let { activeModifier, edgeOptional, fileTypeInfo, fromNode, peerContext, queryModifier, spec, type, } = fetchTask;
|
|
252
|
+
// fix the name in the `add` map when needed. This allows the upcoming
|
|
253
|
+
// reify step to properly update the package.json file dependencies
|
|
254
|
+
// using the correct names retrieved from the manifest data
|
|
255
|
+
const additiveMap = fromNode.importer ? add : transientAdd?.get(fromNode.id);
|
|
256
|
+
spec = fixupAddedNames(additiveMap, manifest, options, spec);
|
|
257
|
+
// handles missing manifest resolution
|
|
258
|
+
if (!manifest) {
|
|
259
|
+
if (!edgeOptional && fromNode.isOptional()) {
|
|
260
|
+
// failed resolution of a non-optional dep of an optional node
|
|
261
|
+
// have to clean up the dependents
|
|
262
|
+
removeOptionalSubgraph(graph, fromNode);
|
|
263
|
+
continue;
|
|
264
|
+
}
|
|
265
|
+
else if (edgeOptional) {
|
|
266
|
+
// failed resolution of an optional dep, just ignore it,
|
|
267
|
+
// nothing to prune because we never added it in the first place.
|
|
268
|
+
continue;
|
|
269
|
+
}
|
|
270
|
+
else {
|
|
271
|
+
throw error('failed to resolve dependency', {
|
|
272
|
+
spec,
|
|
273
|
+
from: fromNode.location,
|
|
274
|
+
});
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
// start peer deps placement process, populating the peer context with
|
|
278
|
+
// dependency data; adding the parent node deps and this manifest's
|
|
279
|
+
// peer deps references to the current peer context set
|
|
280
|
+
const peerPlacement = startPeerPlacement(peerContext, manifest, fromNode, options);
|
|
281
|
+
const peerSetHash = peerPlacement.peerSetHash;
|
|
282
|
+
const queuedEntries = peerPlacement.queuedEntries;
|
|
283
|
+
// places a new node in the graph representing a newly seen dependency
|
|
284
|
+
const node = graph.placePackage(fromNode, type, spec, normalizeManifest(manifest), fileTypeInfo?.id, joinExtra({ peerSetHash, modifier: queryModifier }));
|
|
285
|
+
/* c8 ignore start - not possible, already ensured manifest */
|
|
286
|
+
if (!node) {
|
|
287
|
+
throw error('failed to place package', {
|
|
288
|
+
from: fromNode.location,
|
|
289
|
+
spec,
|
|
290
|
+
});
|
|
291
|
+
}
|
|
292
|
+
/* c8 ignore stop */
|
|
293
|
+
// update the node modifier tracker
|
|
294
|
+
if (activeModifier) {
|
|
295
|
+
modifiers?.updateActiveEntry(node, activeModifier);
|
|
296
|
+
}
|
|
297
|
+
const eligibleForExtraction = type !== 'peer' &&
|
|
298
|
+
type !== 'peerOptional' &&
|
|
299
|
+
remover &&
|
|
300
|
+
extractPromises &&
|
|
301
|
+
actual &&
|
|
302
|
+
scurry &&
|
|
303
|
+
packageInfo &&
|
|
304
|
+
node.inVltStore() &&
|
|
305
|
+
!node.isOptional() &&
|
|
306
|
+
// this fixes an issue with installing `file:pathname` specs
|
|
307
|
+
/* c8 ignore next */ !fileTypeInfo?.isDirectory &&
|
|
308
|
+
!node.importer;
|
|
309
|
+
// extract the node if it meets the criteria for early extraction
|
|
310
|
+
if (eligibleForExtraction) {
|
|
311
|
+
/* c8 ignore start */
|
|
312
|
+
if (seenExtracted?.has(node.id)) {
|
|
313
|
+
continue;
|
|
314
|
+
}
|
|
315
|
+
/* c8 ignore stop */
|
|
316
|
+
seenExtracted?.add(node.id);
|
|
317
|
+
const actualNode = actual.nodes.get(node.id);
|
|
318
|
+
if (!actualNode?.equals(node)) {
|
|
319
|
+
// extract the node without awaiting - push the promise to the array
|
|
320
|
+
const extractPromise = extractNode(node, scurry, remover, options, packageInfo);
|
|
321
|
+
extractPromises.push(extractPromise);
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
// updates graph node information
|
|
325
|
+
if (fileTypeInfo?.path && fileTypeInfo.isDirectory) {
|
|
326
|
+
node.location = fileTypeInfo.path;
|
|
327
|
+
}
|
|
328
|
+
// Do not clobber lockfile-provided resolved values.
|
|
329
|
+
// `setResolved()` cannot infer a tarball without a manifest,
|
|
330
|
+
// which can cause `resolved` to become undefined and the
|
|
331
|
+
// main lockfile to mutate across installs.
|
|
332
|
+
if (!node.resolved) {
|
|
333
|
+
node.setResolved();
|
|
334
|
+
}
|
|
335
|
+
// collect child dependencies for processing in the next level
|
|
336
|
+
const nextPeerDeps = new Map();
|
|
337
|
+
// compute deps normally
|
|
338
|
+
const bundleDeps = manifest.bundleDependencies;
|
|
339
|
+
const bundled = new Set((node.id.startsWith('git') ||
|
|
340
|
+
node.importer ||
|
|
341
|
+
!isStringArray(bundleDeps)) ?
|
|
342
|
+
[]
|
|
343
|
+
: bundleDeps);
|
|
344
|
+
// setup next level to process all child dependencies in the manifest
|
|
345
|
+
const nextDeps = [];
|
|
346
|
+
// traverse actual dependency declarations in the manifest
|
|
347
|
+
// creating dependency entries for them
|
|
348
|
+
for (const depTypeName of longDependencyTypes) {
|
|
349
|
+
const depRecord = manifest[depTypeName];
|
|
350
|
+
if (depRecord && shouldInstallDepType(node, depTypeName)) {
|
|
351
|
+
// Sort Object.entries for deterministic iteration
|
|
352
|
+
const sortedEntries = Object.entries(depRecord).sort(([a], [b]) => a.localeCompare(b, 'en'));
|
|
353
|
+
for (const [name, bareSpec] of sortedEntries) {
|
|
354
|
+
// might need to skip already placed peer deps here
|
|
355
|
+
if (bundled.has(name))
|
|
356
|
+
continue;
|
|
357
|
+
const dep = {
|
|
358
|
+
type: shorten(depTypeName, name, manifest),
|
|
359
|
+
spec: Spec.parse(name, bareSpec, {
|
|
360
|
+
...options,
|
|
361
|
+
registry: spec.registry,
|
|
362
|
+
}),
|
|
363
|
+
};
|
|
364
|
+
if (depTypeName === 'peerDependencies') {
|
|
365
|
+
nextPeerDeps.set(name, dep);
|
|
366
|
+
}
|
|
367
|
+
else {
|
|
368
|
+
nextDeps.push(dep);
|
|
369
|
+
}
|
|
370
|
+
}
|
|
371
|
+
}
|
|
372
|
+
}
|
|
373
|
+
// Inject transient dependencies for non-importer nodes (nested folders)
|
|
374
|
+
// These are deps that were added from a nested folder context using
|
|
375
|
+
// relative file: specs that should resolve relative to that folder
|
|
376
|
+
const transientDeps = transientAdd?.get(node.id);
|
|
377
|
+
if (transientDeps) {
|
|
378
|
+
for (const [, dep] of transientDeps) {
|
|
379
|
+
if (dep.type === 'peer' || dep.type === 'peerOptional') {
|
|
380
|
+
nextPeerDeps.set(dep.spec.name, dep);
|
|
381
|
+
continue;
|
|
382
|
+
}
|
|
383
|
+
// remove the dependency from nextDeps if it already exists
|
|
384
|
+
const index = nextDeps.findIndex(d => d.spec.name === dep.spec.name);
|
|
385
|
+
if (index !== -1) {
|
|
386
|
+
nextDeps.splice(index, 1);
|
|
387
|
+
}
|
|
388
|
+
nextDeps.push(dep);
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
// Remove transient removals when needed
|
|
392
|
+
const transientRemovals = transientRemove?.get(node.id);
|
|
393
|
+
if (transientRemovals) {
|
|
394
|
+
for (const depName of transientRemovals) {
|
|
395
|
+
const index = nextDeps.findIndex(dep => dep.spec.name === depName);
|
|
396
|
+
if (index !== -1) {
|
|
397
|
+
nextDeps.splice(index, 1);
|
|
398
|
+
continue;
|
|
399
|
+
}
|
|
400
|
+
if (nextPeerDeps.has(depName)) {
|
|
401
|
+
nextPeerDeps.delete(depName);
|
|
402
|
+
}
|
|
403
|
+
}
|
|
404
|
+
}
|
|
405
|
+
// finish peer placement for this node, resolving satisfied peers
|
|
406
|
+
// to seen nodes from the peer context and adding unsatisfied peers
|
|
407
|
+
// to `nextDeps` so they get processed along regular dependencies
|
|
408
|
+
const updateContext = endPeerPlacement(peerContext, nextDeps, nextPeerDeps, graph, spec, fromNode, node, type, queuedEntries);
|
|
409
|
+
childDepsToProcess.push({
|
|
410
|
+
node,
|
|
411
|
+
deps: nextDeps,
|
|
412
|
+
modifierRefs: modifiers?.tryDependencies(node, nextDeps),
|
|
413
|
+
peerContext,
|
|
414
|
+
updateContext,
|
|
415
|
+
});
|
|
416
|
+
}
|
|
417
|
+
return childDepsToProcess;
|
|
418
|
+
};
|
|
419
|
+
/**
|
|
420
|
+
* Append new nodes in the given `graph` for dependencies specified at `add`
|
|
421
|
+
* and missing dependencies from the `deps` parameter.
|
|
422
|
+
*
|
|
423
|
+
* Uses **breadth-first traversal** (BFS) with **deterministic ordering** to
|
|
424
|
+
* ensure reproducible builds. The algorithm:
|
|
425
|
+
*
|
|
426
|
+
* 1. Process all deps at the current level in parallel
|
|
427
|
+
* 2. After each level, run `postPlacementPeerCheck` to handle peer contexts
|
|
428
|
+
* 3. Collect child deps for the next level
|
|
429
|
+
* 4. Repeat until no more deps to process
|
|
430
|
+
*
|
|
431
|
+
* **Peer Context Isolation**: Each workspace importer gets its own peer context
|
|
432
|
+
* to prevent cross-workspace leakage. Without this, `react@^18` from workspace A
|
|
433
|
+
* could incorrectly satisfy `react@^19` peer deps in workspace B.
|
|
434
|
+
*
|
|
435
|
+
* **Early Extraction**: When `actual` graph is provided, nodes are extracted
|
|
436
|
+
* to the vlt store during graph construction (not after), improving performance.
|
|
437
|
+
*/
|
|
438
|
+
export const appendNodes = async (packageInfo, graph, fromNode, deps, scurry, options, seen, add, modifiers, modifierRefs, extractPromises, actual, seenExtracted, remover, transientAdd, transientRemove) => {
|
|
439
|
+
// Cycle detection: skip if already processed
|
|
440
|
+
/* c8 ignore next */
|
|
441
|
+
if (seen.has(fromNode.id))
|
|
442
|
+
return;
|
|
443
|
+
seen.add(fromNode.id);
|
|
444
|
+
// PEER CONTEXT ISOLATION: Each workspace importer needs its own context
|
|
445
|
+
// to prevent peer targets from one workspace affecting another.
|
|
446
|
+
// The main importer (index 0) uses the initial context; others get fresh ones.
|
|
447
|
+
let initialPeerContext = graph.peerContexts[0];
|
|
448
|
+
/* c8 ignore start - impossible */
|
|
449
|
+
if (!initialPeerContext)
|
|
450
|
+
throw error('no initial peer context found in graph');
|
|
451
|
+
/* c8 ignore stop */
|
|
452
|
+
if (fromNode.importer && fromNode !== graph.mainImporter) {
|
|
453
|
+
// Create isolated peer context for this workspace importer
|
|
454
|
+
const nextPeerContext = new Map();
|
|
455
|
+
nextPeerContext.index = graph.nextPeerContextIndex();
|
|
456
|
+
graph.peerContexts[nextPeerContext.index] = nextPeerContext;
|
|
457
|
+
initialPeerContext = nextPeerContext;
|
|
458
|
+
}
|
|
459
|
+
// BFS queue: process deps level by level for deterministic builds
|
|
460
|
+
let currentLevelDeps = [
|
|
461
|
+
{
|
|
462
|
+
node: fromNode,
|
|
463
|
+
deps,
|
|
464
|
+
modifierRefs,
|
|
465
|
+
depth: 0,
|
|
466
|
+
peerContext: initialPeerContext,
|
|
467
|
+
/* c8 ignore start */
|
|
468
|
+
updateContext: {
|
|
469
|
+
putEntries: () => undefined,
|
|
470
|
+
resolvePeerDeps: () => { },
|
|
471
|
+
},
|
|
472
|
+
/* c8 ignore stop */
|
|
473
|
+
},
|
|
474
|
+
];
|
|
475
|
+
// BFS MAIN LOOP: Process level by level until no more deps
|
|
476
|
+
while (currentLevelDeps.length > 0) {
|
|
477
|
+
const nextLevelDeps = [];
|
|
478
|
+
// ============================================================
|
|
479
|
+
// PHASE A: PARALLEL FETCH (READ-ONLY)
|
|
480
|
+
// ============================================================
|
|
481
|
+
// Fetch all manifests at this level in parallel without mutating the graph.
|
|
482
|
+
// This phase is read-only to avoid race conditions from network timing.
|
|
483
|
+
const fetchResults = await Promise.all(currentLevelDeps.map(async ({ node, deps: nodeDeps, modifierRefs: nodeModifierRefs, peerContext, depth, }) => {
|
|
484
|
+
// Cycle prevention: mark as seen when starting to process
|
|
485
|
+
seen.add(node.id);
|
|
486
|
+
// Fetch manifests and collect tasks (no graph mutations)
|
|
487
|
+
const result = await fetchManifestsForDeps(packageInfo, graph, node,
|
|
488
|
+
// Sort by name for deterministic ordering (reproducible builds)
|
|
489
|
+
nodeDeps.sort((a, b) => a.spec.name.localeCompare(b.spec.name, 'en')), scurry, peerContext, nodeModifierRefs, depth);
|
|
490
|
+
return {
|
|
491
|
+
entry: {
|
|
492
|
+
node,
|
|
493
|
+
deps: nodeDeps,
|
|
494
|
+
modifierRefs: nodeModifierRefs,
|
|
495
|
+
peerContext,
|
|
496
|
+
depth,
|
|
497
|
+
},
|
|
498
|
+
result,
|
|
499
|
+
};
|
|
500
|
+
}));
|
|
501
|
+
// ============================================================
|
|
502
|
+
// PHASE B: SERIAL MUTATIONS (DETERMINISTIC ORDER)
|
|
503
|
+
// ============================================================
|
|
504
|
+
// Sort results by stable identifiers to ensure deterministic ordering
|
|
505
|
+
// regardless of which manifest fetch completed first
|
|
506
|
+
const sortedResults = fetchResults.sort((a, b) => {
|
|
507
|
+
// Sort by node ID (DepID-based, stable) and depth
|
|
508
|
+
const keyA = `${a.entry.node.id}::${a.entry.depth}`;
|
|
509
|
+
const keyB = `${b.entry.node.id}::${b.entry.depth}`;
|
|
510
|
+
return keyA.localeCompare(keyB, 'en');
|
|
511
|
+
});
|
|
512
|
+
// Apply all mutations serially in deterministic order
|
|
513
|
+
const levelResults = [];
|
|
514
|
+
for (const { entry, result } of sortedResults) {
|
|
515
|
+
// Apply accumulated fork requests if any (from Phase A deferred forks)
|
|
516
|
+
if (result.forkRequests.length > 0) {
|
|
517
|
+
const forkedContext = forkPeerContext(graph, entry.peerContext, result.forkRequests);
|
|
518
|
+
entry.peerContext = forkedContext;
|
|
519
|
+
// Update peer context in all placement tasks to use forked context
|
|
520
|
+
for (const task of result.placementTasks) {
|
|
521
|
+
task.fetchTask.peerContext = forkedContext;
|
|
522
|
+
}
|
|
523
|
+
}
|
|
524
|
+
// Apply reuse edges in deterministic order (before placement)
|
|
525
|
+
// Sort reuse tasks by spec name for stability
|
|
526
|
+
const sortedReuseTasks = result.reuseTasks.sort((a, b) => a.spec.name.localeCompare(b.spec.name, 'en'));
|
|
527
|
+
for (const { type, spec, fromNode, toNode, } of sortedReuseTasks) {
|
|
528
|
+
graph.addEdge(type, spec, fromNode, toNode);
|
|
529
|
+
}
|
|
530
|
+
// Place nodes and collect child deps
|
|
531
|
+
const placed = await processPlacementTasks(graph, options, result.placementTasks, add, modifiers, scurry, packageInfo, extractPromises, actual, seenExtracted, remover, transientAdd, transientRemove);
|
|
532
|
+
levelResults.push(placed);
|
|
533
|
+
}
|
|
534
|
+
// ============================================================
|
|
535
|
+
// PHASE C: POST-PLACEMENT PEER CHECK
|
|
536
|
+
// ============================================================
|
|
537
|
+
// After all nodes at this level are placed, update peer contexts,
|
|
538
|
+
// fork as needed, and resolve peer deps that can be satisfied.
|
|
539
|
+
// This must happen AFTER placement so sibling nodes are available.
|
|
540
|
+
postPlacementPeerCheck(graph, levelResults);
|
|
541
|
+
// ============================================================
|
|
542
|
+
// STEP 3: COLLECT CHILD DEPS FOR NEXT LEVEL
|
|
543
|
+
// ============================================================
|
|
544
|
+
for (const childDepsToProcess of levelResults) {
|
|
545
|
+
for (const childDep of childDepsToProcess) {
|
|
546
|
+
// Skip already-seen nodes (cycle prevention)
|
|
547
|
+
if (!seen.has(childDep.node.id)) {
|
|
548
|
+
/* c8 ignore next */
|
|
549
|
+
const currentDepth = currentLevelDeps[0]?.depth ?? 0;
|
|
550
|
+
nextLevelDeps.push({
|
|
551
|
+
...childDep,
|
|
552
|
+
depth: currentDepth + 1,
|
|
553
|
+
});
|
|
554
|
+
}
|
|
555
|
+
}
|
|
556
|
+
}
|
|
557
|
+
// Advance to next BFS level
|
|
558
|
+
currentLevelDeps = nextLevelDeps;
|
|
559
|
+
}
|
|
560
|
+
};
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import type { PackageJson } from '@vltpkg/package-json';
|
|
2
|
+
import type { RefreshIdealGraphOptions } from './refresh-ideal-graph.ts';
|
|
3
|
+
import type { Graph } from '../graph.ts';
|
|
4
|
+
export type BuildIdealFromStartingGraphOptions = RefreshIdealGraphOptions & {
|
|
5
|
+
packageJson: PackageJson;
|
|
6
|
+
};
|
|
7
|
+
/**
|
|
8
|
+
* Builds an ideal {@link Graph} representing the dependencies that
|
|
9
|
+
* should be present in order to fulfill the requirements defined
|
|
10
|
+
* by the `package.json` and `vlt-lock.json` files using the `graph` set
|
|
11
|
+
* in options as a starting point. Also add / remove any dependencies
|
|
12
|
+
* listed in the `add` and `remove` properties.
|
|
13
|
+
*/
|
|
14
|
+
export declare const buildIdealFromStartingGraph: (options: BuildIdealFromStartingGraphOptions) => Promise<Graph>;
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import { getImporterSpecs } from "./get-importer-specs.js";
|
|
2
|
+
import { refreshIdealGraph } from "./refresh-ideal-graph.js";
|
|
3
|
+
import { resolveSaveType } from "../resolve-save-type.js";
|
|
4
|
+
/**
|
|
5
|
+
* Builds an ideal {@link Graph} representing the dependencies that
|
|
6
|
+
* should be present in order to fulfill the requirements defined
|
|
7
|
+
* by the `package.json` and `vlt-lock.json` files using the `graph` set
|
|
8
|
+
* in options as a starting point. Also add / remove any dependencies
|
|
9
|
+
* listed in the `add` and `remove` properties.
|
|
10
|
+
*/
|
|
11
|
+
export const buildIdealFromStartingGraph = async (options) => {
|
|
12
|
+
// Gets a map of dependencies that are keyed to its importer node ids,
|
|
13
|
+
// merging values already found in the graph with user specified values.
|
|
14
|
+
// Any dependencies that are already satisfied in the starting `graph`
|
|
15
|
+
// are going to be pruned from the resulting object.
|
|
16
|
+
const importerSpecs = getImporterSpecs(options);
|
|
17
|
+
// merge modifiedDependencies flags
|
|
18
|
+
options.add.modifiedDependencies =
|
|
19
|
+
options.add.modifiedDependencies ||
|
|
20
|
+
importerSpecs.add.modifiedDependencies;
|
|
21
|
+
options.remove.modifiedDependencies =
|
|
22
|
+
options.remove.modifiedDependencies ||
|
|
23
|
+
importerSpecs.remove.modifiedDependencies;
|
|
24
|
+
// merge values found on node specs with
|
|
25
|
+
// user-provided values from `options.add`
|
|
26
|
+
for (const [nodeId, deps] of importerSpecs.add) {
|
|
27
|
+
const node = options.graph.nodes.get(nodeId);
|
|
28
|
+
/* c8 ignore next - impossible */
|
|
29
|
+
if (!node)
|
|
30
|
+
continue;
|
|
31
|
+
if (!options.add.has(nodeId)) {
|
|
32
|
+
options.add.set(nodeId, deps);
|
|
33
|
+
continue;
|
|
34
|
+
}
|
|
35
|
+
// merge any deps found when reading the nodes manifest
|
|
36
|
+
// with the ones provided by the user in the `add` options,
|
|
37
|
+
// user-provided deps should take precedence
|
|
38
|
+
for (const [depName, dep] of deps) {
|
|
39
|
+
if (!options.add.get(nodeId)?.has(depName)) {
|
|
40
|
+
options.add.get(nodeId)?.set(depName, dep);
|
|
41
|
+
}
|
|
42
|
+
// update the save type for deps when using an implicit type
|
|
43
|
+
dep.type = resolveSaveType(node, depName, dep.type);
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
// merge values found on node specs with
|
|
47
|
+
// user-provided values from `options.remove`
|
|
48
|
+
for (const [nodeId, deps] of importerSpecs.remove) {
|
|
49
|
+
if (!options.remove.has(nodeId)) {
|
|
50
|
+
options.remove.set(nodeId, deps);
|
|
51
|
+
continue;
|
|
52
|
+
}
|
|
53
|
+
// merge any deps found when reading the nodes manifest
|
|
54
|
+
// with the ones provided by the user in the `remove` options
|
|
55
|
+
for (const depName of deps) {
|
|
56
|
+
options.remove.get(nodeId)?.add(depName);
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
// refreshs the current graph adding the nodes marked for addition
|
|
60
|
+
// and removing the ones marked for removal, while also recalculating
|
|
61
|
+
// peer dependencies and default locations
|
|
62
|
+
await refreshIdealGraph({
|
|
63
|
+
...options,
|
|
64
|
+
transientAdd: importerSpecs.transientAdd,
|
|
65
|
+
transientRemove: importerSpecs.transientRemove,
|
|
66
|
+
});
|
|
67
|
+
options.graph.gc();
|
|
68
|
+
return options.graph;
|
|
69
|
+
};
|