gitnexus 1.6.3-rc.30 → 1.6.3-rc.32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. package/dist/cli/clean.js +19 -1
  2. package/dist/cli/index.js +6 -0
  3. package/dist/cli/remove.d.ts +30 -0
  4. package/dist/cli/remove.js +99 -0
  5. package/dist/core/graph/graph.js +115 -20
  6. package/dist/core/graph/types.d.ts +12 -1
  7. package/dist/core/group/config-parser.d.ts +4 -0
  8. package/dist/core/group/config-parser.js +18 -1
  9. package/dist/core/group/cross-impact.js +11 -24
  10. package/dist/core/group/service.js +45 -10
  11. package/dist/core/ingestion/ast-cache.d.ts +16 -1
  12. package/dist/core/ingestion/ast-cache.js +14 -2
  13. package/dist/core/ingestion/call-processor.js +9 -0
  14. package/dist/core/ingestion/import-processor.js +4 -0
  15. package/dist/core/ingestion/import-resolvers/python.js +9 -6
  16. package/dist/core/ingestion/language-provider.d.ts +12 -25
  17. package/dist/core/ingestion/languages/python/arity-metadata.d.ts +24 -0
  18. package/dist/core/ingestion/languages/python/arity-metadata.js +45 -0
  19. package/dist/core/ingestion/languages/python/arity.d.ts +22 -0
  20. package/dist/core/ingestion/languages/python/arity.js +38 -0
  21. package/dist/core/ingestion/languages/python/cache-stats.d.ts +17 -0
  22. package/dist/core/ingestion/languages/python/cache-stats.js +28 -0
  23. package/dist/core/ingestion/languages/python/captures.d.ts +19 -0
  24. package/dist/core/ingestion/languages/python/captures.js +106 -0
  25. package/dist/core/ingestion/languages/python/import-decomposer.d.ts +15 -0
  26. package/dist/core/ingestion/languages/python/import-decomposer.js +112 -0
  27. package/dist/core/ingestion/languages/python/import-target.d.ts +17 -0
  28. package/dist/core/ingestion/languages/python/import-target.js +95 -0
  29. package/dist/core/ingestion/languages/python/index.d.ts +80 -0
  30. package/dist/core/ingestion/languages/python/index.js +80 -0
  31. package/dist/core/ingestion/languages/python/interpret.d.ts +15 -0
  32. package/dist/core/ingestion/languages/python/interpret.js +191 -0
  33. package/dist/core/ingestion/languages/python/merge-bindings.d.ts +16 -0
  34. package/dist/core/ingestion/languages/python/merge-bindings.js +44 -0
  35. package/dist/core/ingestion/languages/python/query.d.ts +14 -0
  36. package/dist/core/ingestion/languages/python/query.js +272 -0
  37. package/dist/core/ingestion/languages/python/receiver-binding.d.ts +21 -0
  38. package/dist/core/ingestion/languages/python/receiver-binding.js +116 -0
  39. package/dist/core/ingestion/languages/python/scope-resolver.d.ts +16 -0
  40. package/dist/core/ingestion/languages/python/scope-resolver.js +53 -0
  41. package/dist/core/ingestion/languages/python/simple-hooks.d.ts +23 -0
  42. package/dist/core/ingestion/languages/python/simple-hooks.js +35 -0
  43. package/dist/core/ingestion/languages/python.js +14 -0
  44. package/dist/core/ingestion/mro-processor.js +38 -22
  45. package/dist/core/ingestion/parsing-processor.d.ts +9 -1
  46. package/dist/core/ingestion/parsing-processor.js +25 -3
  47. package/dist/core/ingestion/pipeline-phases/index.d.ts +1 -0
  48. package/dist/core/ingestion/pipeline-phases/index.js +1 -0
  49. package/dist/core/ingestion/pipeline-phases/parse-impl.d.ts +10 -0
  50. package/dist/core/ingestion/pipeline-phases/parse-impl.js +17 -2
  51. package/dist/core/ingestion/pipeline-phases/parse.d.ts +18 -0
  52. package/dist/core/ingestion/pipeline.js +2 -1
  53. package/dist/core/ingestion/registry-primary-flag.d.ts +32 -5
  54. package/dist/core/ingestion/registry-primary-flag.js +38 -6
  55. package/dist/core/ingestion/resolve-references.d.ts +63 -0
  56. package/dist/core/ingestion/resolve-references.js +175 -0
  57. package/dist/core/ingestion/scope-extractor-bridge.d.ts +1 -1
  58. package/dist/core/ingestion/scope-extractor-bridge.js +2 -2
  59. package/dist/core/ingestion/scope-extractor.d.ts +1 -2
  60. package/dist/core/ingestion/scope-extractor.js +151 -16
  61. package/dist/core/ingestion/scope-resolution/contract/scope-resolver.d.ts +168 -0
  62. package/dist/core/ingestion/scope-resolution/contract/scope-resolver.js +75 -0
  63. package/dist/core/ingestion/scope-resolution/graph-bridge/edges.d.ts +43 -0
  64. package/dist/core/ingestion/scope-resolution/graph-bridge/edges.js +72 -0
  65. package/dist/core/ingestion/scope-resolution/graph-bridge/ids.d.ts +56 -0
  66. package/dist/core/ingestion/scope-resolution/graph-bridge/ids.js +101 -0
  67. package/dist/core/ingestion/scope-resolution/graph-bridge/imports-to-edges.d.ts +17 -0
  68. package/dist/core/ingestion/scope-resolution/graph-bridge/imports-to-edges.js +46 -0
  69. package/dist/core/ingestion/scope-resolution/graph-bridge/method-dispatch.d.ts +19 -0
  70. package/dist/core/ingestion/scope-resolution/graph-bridge/method-dispatch.js +30 -0
  71. package/dist/core/ingestion/scope-resolution/graph-bridge/node-lookup.d.ts +37 -0
  72. package/dist/core/ingestion/scope-resolution/graph-bridge/node-lookup.js +101 -0
  73. package/dist/core/ingestion/scope-resolution/graph-bridge/references-to-edges.d.ts +38 -0
  74. package/dist/core/ingestion/scope-resolution/graph-bridge/references-to-edges.js +73 -0
  75. package/dist/core/ingestion/scope-resolution/passes/compound-receiver.d.ts +32 -0
  76. package/dist/core/ingestion/scope-resolution/passes/compound-receiver.js +137 -0
  77. package/dist/core/ingestion/scope-resolution/passes/free-call-fallback.d.ts +25 -0
  78. package/dist/core/ingestion/scope-resolution/passes/free-call-fallback.js +61 -0
  79. package/dist/core/ingestion/scope-resolution/passes/imported-return-types.d.ts +48 -0
  80. package/dist/core/ingestion/scope-resolution/passes/imported-return-types.js +130 -0
  81. package/dist/core/ingestion/scope-resolution/passes/mro.d.ts +42 -0
  82. package/dist/core/ingestion/scope-resolution/passes/mro.js +99 -0
  83. package/dist/core/ingestion/scope-resolution/passes/receiver-bound-calls.d.ts +42 -0
  84. package/dist/core/ingestion/scope-resolution/passes/receiver-bound-calls.js +200 -0
  85. package/dist/core/ingestion/scope-resolution/pipeline/phase.d.ts +47 -0
  86. package/dist/core/ingestion/scope-resolution/pipeline/phase.js +123 -0
  87. package/dist/core/ingestion/scope-resolution/pipeline/registry.d.ts +17 -0
  88. package/dist/core/ingestion/scope-resolution/pipeline/registry.js +17 -0
  89. package/dist/core/ingestion/scope-resolution/pipeline/run.d.ts +55 -0
  90. package/dist/core/ingestion/scope-resolution/pipeline/run.js +131 -0
  91. package/dist/core/ingestion/scope-resolution/scope/namespace-targets.d.ts +36 -0
  92. package/dist/core/ingestion/scope-resolution/scope/namespace-targets.js +52 -0
  93. package/dist/core/ingestion/scope-resolution/scope/walkers.d.ts +100 -0
  94. package/dist/core/ingestion/scope-resolution/scope/walkers.js +287 -0
  95. package/dist/core/ingestion/scope-resolution/workspace-index.d.ts +46 -0
  96. package/dist/core/ingestion/scope-resolution/workspace-index.js +109 -0
  97. package/dist/core/ingestion/utils/ast-helpers.d.ts +19 -1
  98. package/dist/core/ingestion/utils/ast-helpers.js +70 -0
  99. package/dist/storage/repo-manager.d.ts +133 -0
  100. package/dist/storage/repo-manager.js +249 -5
  101. package/package.json +2 -2
  102. package/scripts/bench-scope-resolution.ts +134 -0
  103. package/scripts/ci-list-migrated-languages.ts +24 -0
package/dist/cli/clean.js CHANGED
@@ -5,7 +5,7 @@
5
5
  * Also unregisters it from the global registry.
6
6
  */
7
7
  import fs from 'fs/promises';
8
- import { findRepo, unregisterRepo, listRegisteredRepos } from '../storage/repo-manager.js';
8
+ import { findRepo, unregisterRepo, listRegisteredRepos, assertSafeStoragePath, UnsafeStoragePathError, } from '../storage/repo-manager.js';
9
9
  export const cleanCommand = async (options) => {
10
10
  // --all flag: clean all indexed repos
11
11
  if (options?.all) {
@@ -24,6 +24,24 @@ export const cleanCommand = async (options) => {
24
24
  }
25
25
  const entries = await listRegisteredRepos();
26
26
  for (const entry of entries) {
27
+ // Safety guard (#1003 review — @magyargergo): same rationale as
28
+ // remove.ts. `~/.gitnexus/registry.json` is user-writable, so a
29
+ // corrupted or hand-edited entry could point storagePath at the
30
+ // repo root, an empty string, or anywhere else — and
31
+ // fs.rm(recursive: true) on any of those would be catastrophic.
32
+ // Skip poisoned entries without touching disk, but keep going
33
+ // through the rest of the registry (preserves the existing
34
+ // per-repo error-tolerance semantics of `clean --all`).
35
+ try {
36
+ assertSafeStoragePath(entry);
37
+ }
38
+ catch (err) {
39
+ if (err instanceof UnsafeStoragePathError) {
40
+ console.error(`Refusing to clean ${entry.name}: ${err.message}`);
41
+ continue;
42
+ }
43
+ throw err;
44
+ }
27
45
  try {
28
46
  await fs.rm(entry.storagePath, { recursive: true, force: true });
29
47
  await unregisterRepo(entry.path);
package/dist/cli/index.js CHANGED
@@ -59,6 +59,12 @@ program
59
59
  .option('-f, --force', 'Skip confirmation prompt')
60
60
  .option('--all', 'Clean all indexed repos')
61
61
  .action(createLazyAction(() => import('./clean.js'), 'cleanCommand'));
62
+ program
63
+ .command('remove <target>')
64
+ .description('Delete the GitNexus index for a registered repo (by alias, name, or absolute path). ' +
65
+ 'Unlike `clean`, does not require being inside the repo. Idempotent on unknown targets.')
66
+ .option('-f, --force', 'Skip confirmation prompt')
67
+ .action(createLazyAction(() => import('./remove.js'), 'removeCommand'));
62
68
  program
63
69
  .command('wiki [path]')
64
70
  .description('Generate repository wiki from knowledge graph')
@@ -0,0 +1,30 @@
1
+ /**
2
+ * Remove Command (#664)
3
+ *
4
+ * Delete the `.gitnexus/` index for a registered repo and unregister it
5
+ * from the global registry (~/.gitnexus/registry.json). The target is
6
+ * identified by alias / basename-derived name / remote-inferred name /
7
+ * absolute path — no `--repo` flag, just a positional argument so the
8
+ * destructive-command ergonomics match `clean` (which is also
9
+ * destructive but scoped to `process.cwd()`).
10
+ *
11
+ * Compared to `clean`:
12
+ * - `clean` acts on the repo discovered by walking up from cwd.
13
+ * - `remove` acts on any registered repo identified by name or path.
14
+ *
15
+ * Behaviour notes:
16
+ * - Idempotent on unknown targets: exits 0 with a warning so that
17
+ * `remove X && analyze Y` keeps working in scripts. Per #664:
18
+ * "behave atomically and idempotently so retries are safe".
19
+ * - Atomic order mirrors `clean`: fs.rm FIRST, then unregister. A
20
+ * partial failure leaves the registry pointing at a missing dir
21
+ * (recoverable by `listRegisteredRepos({ validate: true })` on
22
+ * next read) rather than the opposite, which would orphan
23
+ * .gitnexus/ directories on disk.
24
+ * - `-f` / `--force` matches the confirmation-skip semantics of
25
+ * `clean -f`. (Distinct from `analyze --force`, which re-indexes;
26
+ * here there is no pipeline, so no conflation.)
27
+ */
28
+ export declare const removeCommand: (target: string, options?: {
29
+ force?: boolean;
30
+ }) => Promise<void>;
@@ -0,0 +1,99 @@
1
+ /**
2
+ * Remove Command (#664)
3
+ *
4
+ * Delete the `.gitnexus/` index for a registered repo and unregister it
5
+ * from the global registry (~/.gitnexus/registry.json). The target is
6
+ * identified by alias / basename-derived name / remote-inferred name /
7
+ * absolute path — no `--repo` flag, just a positional argument so the
8
+ * destructive-command ergonomics match `clean` (which is also
9
+ * destructive but scoped to `process.cwd()`).
10
+ *
11
+ * Compared to `clean`:
12
+ * - `clean` acts on the repo discovered by walking up from cwd.
13
+ * - `remove` acts on any registered repo identified by name or path.
14
+ *
15
+ * Behaviour notes:
16
+ * - Idempotent on unknown targets: exits 0 with a warning so that
17
+ * `remove X && analyze Y` keeps working in scripts. Per #664:
18
+ * "behave atomically and idempotently so retries are safe".
19
+ * - Atomic order mirrors `clean`: fs.rm FIRST, then unregister. A
20
+ * partial failure leaves the registry pointing at a missing dir
21
+ * (recoverable by `listRegisteredRepos({ validate: true })` on
22
+ * next read) rather than the opposite, which would orphan
23
+ * .gitnexus/ directories on disk.
24
+ * - `-f` / `--force` matches the confirmation-skip semantics of
25
+ * `clean -f`. (Distinct from `analyze --force`, which re-indexes;
26
+ * here there is no pipeline, so no conflation.)
27
+ */
28
+ import fs from 'fs/promises';
29
+ import { readRegistry, resolveRegistryEntry, assertSafeStoragePath, unregisterRepo, RegistryNotFoundError, RegistryAmbiguousTargetError, UnsafeStoragePathError, } from '../storage/repo-manager.js';
30
+ export const removeCommand = async (target, options) => {
31
+ // Read the registry snapshot once and pass it to the resolver — this
32
+ // lets us render the "before" state in the dry-run path without a
33
+ // second disk read.
34
+ const entries = await readRegistry();
35
+ let entry;
36
+ try {
37
+ entry = resolveRegistryEntry(entries, target);
38
+ }
39
+ catch (err) {
40
+ if (err instanceof RegistryNotFoundError) {
41
+ // Idempotent: missing target is a no-op warning, not an error.
42
+ // The `availableNames` hint comes from the error itself so users
43
+ // can see what they might have meant.
44
+ console.warn(`Nothing to remove: ${err.message}`);
45
+ return;
46
+ }
47
+ if (err instanceof RegistryAmbiguousTargetError) {
48
+ // Duplicate aliases are allowed via --allow-duplicate-name (#829);
49
+ // refuse to guess which one the user meant — surface the full list
50
+ // and exit non-zero so scripts don't silently pick the wrong repo.
51
+ console.error(`Error: ${err.message}`);
52
+ process.exit(1);
53
+ }
54
+ throw err;
55
+ }
56
+ // Confirmation gate — same shape as `clean`. Default is a dry-run
57
+ // that describes what would be deleted; `--force` actually deletes.
58
+ if (!options?.force) {
59
+ console.log(`This will delete the GitNexus index for: ${entry.name}`);
60
+ console.log(` Path: ${entry.path}`);
61
+ console.log(` Storage: ${entry.storagePath}`);
62
+ console.log('\nRun with --force to confirm deletion.');
63
+ return;
64
+ }
65
+ // Safety guard (#1003 review — @magyargergo): refuse to proceed if
66
+ // the registry entry's `storagePath` isn't the canonical
67
+ // `<entry.path>/.gitnexus` subfolder. `~/.gitnexus/registry.json` is
68
+ // user-writable, so a corrupted or hand-edited entry could point
69
+ // storagePath at the repo root, an empty string (→ cwd), a parent
70
+ // dir, or anywhere else; `fs.rm(recursive: true, force: true)` on
71
+ // any of those would be a runtime disaster. Bail before touching
72
+ // disk, with an actionable hint for recovering a broken registry.
73
+ try {
74
+ assertSafeStoragePath(entry);
75
+ }
76
+ catch (err) {
77
+ if (err instanceof UnsafeStoragePathError) {
78
+ console.error(`Error: ${err.message}`);
79
+ process.exit(1);
80
+ }
81
+ throw err;
82
+ }
83
+ // Deletion order: fs.rm first, then unregister. If fs.rm fails mid-way,
84
+ // the registry entry stays so the user can retry. If fs.rm succeeds but
85
+ // unregister throws (e.g. ENOSPC on registry write), the entry becomes
86
+ // orphaned — `listRegisteredRepos({ validate: true })` prunes those on
87
+ // next read, so the failure is self-healing.
88
+ try {
89
+ await fs.rm(entry.storagePath, { recursive: true, force: true });
90
+ await unregisterRepo(entry.path);
91
+ console.log(`Removed: ${entry.name}`);
92
+ console.log(` Path: ${entry.path}`);
93
+ console.log(` Storage: ${entry.storagePath}`);
94
+ }
95
+ catch (err) {
96
+ console.error(`Failed to remove ${entry.name}:`, err);
97
+ process.exit(1);
98
+ }
99
+ };
@@ -1,28 +1,113 @@
1
+ /** Fresh empty iterator per call — `[].values()` returns a new
2
+ * exhausted iterator each invocation, so empty-type lookups don't
3
+ * share a single already-exhausted iterator across callers. */
4
+ function emptyRelIter() {
5
+ return [].values();
6
+ }
1
7
  export const createKnowledgeGraph = () => {
2
8
  const nodeMap = new Map();
3
9
  const relationshipMap = new Map();
10
+ // Per-type index maintained alongside `relationshipMap`. Bucket
11
+ // values are `Map<id, Relationship>` so per-type iteration is cheap
12
+ // and per-edge removal is O(1). See plan
13
+ // docs/plans/2026-04-20-002-perf-parse-heritage-mro-plan.md (Unit 1).
14
+ const relationshipsByType = new Map();
15
+ // Reverse-adjacency index: nodeId → Set<relId> of every edge where
16
+ // this node appears as source OR target. Maintained on writeRel /
17
+ // deleteRel so `removeNode` can delete a node's edges in
18
+ // O(edges-touching-node) instead of O(total-edges).
19
+ const edgeIdsByNode = new Map();
20
+ // File index: filePath → Set<nodeId>. Maintained on addNode /
21
+ // removeNode so `removeNodesByFile` reaches its file's nodes
22
+ // directly instead of scanning the whole node map.
23
+ const nodeIdsByFile = new Map();
24
+ // Private helpers that encode the dual-index invariants in one
25
+ // place. All mutation paths go through these — adding a new
26
+ // mutation method only needs to call the helper, not remember to
27
+ // touch every index.
28
+ const addToBucket = (map, key, value) => {
29
+ let bucket = map.get(key);
30
+ if (bucket === undefined) {
31
+ bucket = new Set();
32
+ map.set(key, bucket);
33
+ }
34
+ bucket.add(value);
35
+ };
36
+ const removeFromBucket = (map, key, value) => {
37
+ const bucket = map.get(key);
38
+ if (bucket === undefined)
39
+ return;
40
+ bucket.delete(value);
41
+ if (bucket.size === 0)
42
+ map.delete(key);
43
+ };
44
+ const writeRel = (rel) => {
45
+ relationshipMap.set(rel.id, rel);
46
+ let typeBucket = relationshipsByType.get(rel.type);
47
+ if (typeBucket === undefined) {
48
+ typeBucket = new Map();
49
+ relationshipsByType.set(rel.type, typeBucket);
50
+ }
51
+ typeBucket.set(rel.id, rel);
52
+ addToBucket(edgeIdsByNode, rel.sourceId, rel.id);
53
+ // Guard against a self-edge writing the same rel.id into the
54
+ // same Set twice — Set dedup handles it, but we skip explicitly
55
+ // for clarity.
56
+ if (rel.targetId !== rel.sourceId) {
57
+ addToBucket(edgeIdsByNode, rel.targetId, rel.id);
58
+ }
59
+ };
60
+ const deleteRel = (rel) => {
61
+ relationshipMap.delete(rel.id);
62
+ const typeBucket = relationshipsByType.get(rel.type);
63
+ if (typeBucket !== undefined) {
64
+ typeBucket.delete(rel.id);
65
+ if (typeBucket.size === 0)
66
+ relationshipsByType.delete(rel.type);
67
+ }
68
+ removeFromBucket(edgeIdsByNode, rel.sourceId, rel.id);
69
+ if (rel.targetId !== rel.sourceId) {
70
+ removeFromBucket(edgeIdsByNode, rel.targetId, rel.id);
71
+ }
72
+ };
4
73
  const addNode = (node) => {
5
- if (!nodeMap.has(node.id)) {
6
- nodeMap.set(node.id, node);
74
+ if (nodeMap.has(node.id))
75
+ return;
76
+ nodeMap.set(node.id, node);
77
+ const filePath = node.properties?.filePath;
78
+ if (typeof filePath === 'string' && filePath.length > 0) {
79
+ addToBucket(nodeIdsByFile, filePath, node.id);
7
80
  }
8
81
  };
9
82
  const addRelationship = (relationship) => {
10
- if (!relationshipMap.has(relationship.id)) {
11
- relationshipMap.set(relationship.id, relationship);
12
- }
83
+ if (relationshipMap.has(relationship.id))
84
+ return;
85
+ writeRel(relationship);
13
86
  };
14
87
  /**
15
- * Remove a single node and all relationships involving it
88
+ * Remove a single node and all relationships involving it.
89
+ * O(edges-touching-node) via the reverse-adjacency index — no full
90
+ * relationshipMap scan.
16
91
  */
17
92
  const removeNode = (nodeId) => {
18
- if (!nodeMap.has(nodeId))
93
+ const node = nodeMap.get(nodeId);
94
+ if (node === undefined)
19
95
  return false;
20
96
  nodeMap.delete(nodeId);
21
- // Remove all relationships involving this node
22
- for (const [relId, rel] of relationshipMap) {
23
- if (rel.sourceId === nodeId || rel.targetId === nodeId) {
24
- relationshipMap.delete(relId);
97
+ const filePath = node.properties?.filePath;
98
+ if (typeof filePath === 'string' && filePath.length > 0) {
99
+ removeFromBucket(nodeIdsByFile, filePath, nodeId);
100
+ }
101
+ const touchingEdgeIds = edgeIdsByNode.get(nodeId);
102
+ if (touchingEdgeIds !== undefined) {
103
+ // Snapshot the ids before iterating — deleteRel mutates the same
104
+ // Set via removeFromBucket, which would break mid-loop iteration.
105
+ for (const relId of [...touchingEdgeIds]) {
106
+ const rel = relationshipMap.get(relId);
107
+ if (rel !== undefined)
108
+ deleteRel(rel);
25
109
  }
110
+ edgeIdsByNode.delete(nodeId);
26
111
  }
27
112
  return true;
28
113
  };
@@ -31,20 +116,26 @@ export const createKnowledgeGraph = () => {
31
116
  * Returns true if the relationship existed and was removed, false otherwise.
32
117
  */
33
118
  const removeRelationship = (relationshipId) => {
34
- return relationshipMap.delete(relationshipId);
119
+ const rel = relationshipMap.get(relationshipId);
120
+ if (rel === undefined)
121
+ return false;
122
+ deleteRel(rel);
123
+ return true;
35
124
  };
36
125
  /**
37
126
  * Remove all nodes (and their relationships) belonging to a file.
127
+ * O(file-nodes × avg-edges-per-node) via the file index — no full
128
+ * node-map scan.
38
129
  */
39
130
  const removeNodesByFile = (filePath) => {
40
- let removed = 0;
41
- for (const [nodeId, node] of nodeMap) {
42
- if (node.properties?.filePath === filePath) {
43
- removeNode(nodeId);
44
- removed++;
45
- }
46
- }
47
- return removed;
131
+ const nodeIds = nodeIdsByFile.get(filePath);
132
+ if (nodeIds === undefined)
133
+ return 0;
134
+ // Snapshot before iterating — removeNode mutates nodeIdsByFile.
135
+ const snapshot = [...nodeIds];
136
+ for (const nodeId of snapshot)
137
+ removeNode(nodeId);
138
+ return snapshot.length;
48
139
  };
49
140
  return {
50
141
  get nodes() {
@@ -55,6 +146,10 @@ export const createKnowledgeGraph = () => {
55
146
  },
56
147
  iterNodes: () => nodeMap.values(),
57
148
  iterRelationships: () => relationshipMap.values(),
149
+ iterRelationshipsByType: (type) => {
150
+ const bucket = relationshipsByType.get(type);
151
+ return bucket === undefined ? emptyRelIter() : bucket.values();
152
+ },
58
153
  forEachNode(fn) {
59
154
  nodeMap.forEach(fn);
60
155
  },
@@ -6,12 +6,23 @@
6
6
  *
7
7
  * This file only defines the CLI's KnowledgeGraph with mutation methods.
8
8
  */
9
- import type { GraphNode, GraphRelationship } from '../../_shared/index.js';
9
+ import type { GraphNode, GraphRelationship, RelationshipType } from '../../_shared/index.js';
10
10
  export interface KnowledgeGraph {
11
11
  nodes: GraphNode[];
12
12
  relationships: GraphRelationship[];
13
13
  iterNodes: () => IterableIterator<GraphNode>;
14
14
  iterRelationships: () => IterableIterator<GraphRelationship>;
15
+ /**
16
+ * Iterate ONLY relationships of the given type, backed by a per-type
17
+ * index maintained in `addRelationship` / `removeRelationship` /
18
+ * `removeNode` / `removeNodesByFile`. Returns an empty iterator when
19
+ * the graph contains no relationships of that type.
20
+ *
21
+ * Prefer this over `iterRelationships()` + per-edge type filtering
22
+ * for hot paths (MRO setup, heritage walks). Backwards-compatible:
23
+ * existing `iterRelationships()` callers keep working.
24
+ */
25
+ iterRelationshipsByType: (type: RelationshipType) => IterableIterator<GraphRelationship>;
15
26
  forEachNode: (fn: (node: GraphNode) => void) => void;
16
27
  forEachRelationship: (fn: (rel: GraphRelationship) => void) => void;
17
28
  getNode: (id: string) => GraphNode | undefined;
@@ -1,3 +1,7 @@
1
1
  import type { GroupConfig } from './types.js';
2
2
  export declare function parseGroupConfig(yamlContent: string): GroupConfig;
3
+ export declare class GroupNotFoundError extends Error {
4
+ readonly groupName: string;
5
+ constructor(groupName: string);
6
+ }
3
7
  export declare function loadGroupConfig(groupDir: string): Promise<GroupConfig>;
@@ -74,10 +74,27 @@ export function parseGroupConfig(yamlContent) {
74
74
  matching,
75
75
  };
76
76
  }
77
+ export class GroupNotFoundError extends Error {
78
+ groupName;
79
+ constructor(groupName) {
80
+ super(`Group "${groupName}" not found`);
81
+ this.groupName = groupName;
82
+ this.name = 'GroupNotFoundError';
83
+ }
84
+ }
77
85
  export async function loadGroupConfig(groupDir) {
78
86
  const fsp = await import('node:fs/promises');
79
87
  const path = await import('node:path');
80
88
  const yamlPath = path.join(groupDir, 'group.yaml');
81
- const content = await fsp.readFile(yamlPath, 'utf-8');
89
+ let content;
90
+ try {
91
+ content = await fsp.readFile(yamlPath, 'utf-8');
92
+ }
93
+ catch (err) {
94
+ if (err.code === 'ENOENT') {
95
+ throw new GroupNotFoundError(path.basename(groupDir));
96
+ }
97
+ throw err;
98
+ }
82
99
  return parseGroupConfig(content);
83
100
  }
@@ -4,7 +4,7 @@
4
4
  */
5
5
  import fsp from 'node:fs/promises';
6
6
  import path from 'node:path';
7
- import { loadGroupConfig } from './config-parser.js';
7
+ import { GroupNotFoundError, loadGroupConfig } from './config-parser.js';
8
8
  import { fileMatchesServicePrefix, normalizeServicePrefix, repoInSubgroup, } from './group-path-utils.js';
9
9
  import { getGroupDir } from './storage.js';
10
10
  import { closeBridgeDb, openBridgeDbReadOnly, queryBridge, readBridgeMeta } from './bridge-db.js';
@@ -242,6 +242,8 @@ export async function runGroupImpact(deps, params) {
242
242
  config = await loadGroupConfig(groupDir);
243
243
  }
244
244
  catch (e) {
245
+ if (e instanceof GroupNotFoundError)
246
+ return { error: `Group "${name}" not found. Run group_list to see configured groups.` };
245
247
  return { error: e instanceof Error ? e.message : String(e) };
246
248
  }
247
249
  const resolved = await resolveGroupRepo(deps.port, config, repoPath);
@@ -255,13 +257,10 @@ export async function runGroupImpact(deps, params) {
255
257
  includeTests,
256
258
  minConfidence,
257
259
  };
258
- // Single shared deadline for Phase 1 (local walk) + Phase 2 (bridge fan-out).
259
- // Phase 1 still gets the full budget; Phase 2 only uses whatever wall-clock
260
- // time is left, so total work cannot exceed `timeoutMs`.
261
260
  const deadline = Date.now() + Math.max(0, timeoutMs);
262
261
  const { value: local, timedOut: localTimedOut } = await safeLocalImpact(deps.port, resolved, impactParams, timeoutMs);
263
262
  if (localTimedOut) {
264
- const base = local;
263
+ const _base = local;
265
264
  return {
266
265
  local,
267
266
  group: name,
@@ -283,24 +282,13 @@ export async function runGroupImpact(deps, params) {
283
282
  }
284
283
  const localObj = local;
285
284
  if (localObj?.error && typeof localObj.error === 'string') {
286
- const empty = {
287
- local,
288
- group: name,
289
- cross: [],
290
- outOfScope: [],
291
- truncated: false,
292
- truncatedRepos: [],
293
- summary: {
294
- direct: 0,
295
- processes_affected: 0,
296
- modules_affected: 0,
297
- cross_repo_hits: 0,
298
- },
299
- risk: 'UNKNOWN',
300
- timeoutMs,
301
- crossDepthWarning,
302
- };
303
- return empty;
285
+ // Fail closed: the local-impact phase errored (missing symbol, graph-load
286
+ // failure, thrown exception wrapped by safeLocalImpact, or port-returned
287
+ // `{ error }`). Do NOT wrap it into a zero-hit success payload — callers
288
+ // branch on top-level `error`, and a blast-radius tool reporting "no
289
+ // impact" on the failure path is a false negative on a safety-critical
290
+ // signal. Bubble the error so consumers treat it as a failure.
291
+ return { error: `Local impact failed for ${repoPath}: ${localObj.error}` };
304
292
  }
305
293
  if (servicePrefix) {
306
294
  const tf = localObj?.target?.filePath;
@@ -372,7 +360,6 @@ export async function runGroupImpact(deps, params) {
372
360
  continue;
373
361
  }
374
362
  if (!repoInSubgroup(n.neighborRepo, subgroup)) {
375
- // CrossLink convention: consumer -> provider
376
363
  outOfScope.push({
377
364
  from: direction === 'upstream' ? n.neighborRepo : repoPath,
378
365
  to: direction === 'upstream' ? repoPath : n.neighborRepo,
@@ -5,7 +5,7 @@
5
5
  import fsp from 'node:fs/promises';
6
6
  import path from 'node:path';
7
7
  import { checkStaleness } from '../git-staleness.js';
8
- import { loadGroupConfig } from './config-parser.js';
8
+ import { GroupNotFoundError, loadGroupConfig } from './config-parser.js';
9
9
  import { fileMatchesServicePrefix, normalizeServicePrefix, repoInSubgroup, } from './group-path-utils.js';
10
10
  import { getDefaultGitnexusDir, getGroupDir, listGroups, readContractRegistry } from './storage.js';
11
11
  import { syncGroup } from './sync.js';
@@ -134,7 +134,15 @@ export class GroupService {
134
134
  return { groups };
135
135
  }
136
136
  const groupDir = getGroupDir(getDefaultGitnexusDir(), name);
137
- const config = await loadGroupConfig(groupDir);
137
+ let config;
138
+ try {
139
+ config = await loadGroupConfig(groupDir);
140
+ }
141
+ catch (err) {
142
+ if (err instanceof GroupNotFoundError)
143
+ return { error: `Group "${name}" not found. Run group_list to see configured groups.` };
144
+ throw err;
145
+ }
138
146
  return {
139
147
  name: config.name,
140
148
  description: config.description,
@@ -147,7 +155,15 @@ export class GroupService {
147
155
  if (!name)
148
156
  return { error: 'name is required' };
149
157
  const groupDir = getGroupDir(getDefaultGitnexusDir(), name);
150
- const config = await loadGroupConfig(groupDir);
158
+ let config;
159
+ try {
160
+ config = await loadGroupConfig(groupDir);
161
+ }
162
+ catch (err) {
163
+ if (err instanceof GroupNotFoundError)
164
+ return { error: `Group "${name}" not found. Run group_list to see configured groups.` };
165
+ throw err;
166
+ }
151
167
  const result = await syncGroup(config, {
152
168
  groupDir,
153
169
  exactOnly: Boolean(params.exactOnly),
@@ -222,6 +238,14 @@ export class GroupService {
222
238
  config = await loadGroupConfig(groupDir);
223
239
  }
224
240
  catch (e) {
241
+ if (e instanceof GroupNotFoundError)
242
+ return {
243
+ group: name,
244
+ target: target || uid,
245
+ service: servicePrefix,
246
+ error: `Group "${name}" not found. Run group_list to see configured groups.`,
247
+ results: [],
248
+ };
225
249
  return {
226
250
  group: name,
227
251
  target: target || uid,
@@ -231,9 +255,6 @@ export class GroupService {
231
255
  };
232
256
  }
233
257
  const memberEntries = Object.entries(config.repos).filter(([repoPath]) => repoInSubgroup(repoPath, subgroup, subgroupExact));
234
- // Per-repo work is independent (each repo opens its own DB handle and the
235
- // group-level result preserves repo iteration order via the indexed map).
236
- // Errors are caught per repo so one slow/failed member does not block the rest.
237
258
  const results = await Promise.all(memberEntries.map(async ([repoPath, registryName]) => {
238
259
  try {
239
260
  const repoObj = await this.port.resolveRepo(registryName);
@@ -282,10 +303,16 @@ export class GroupService {
282
303
  const subgroup = typeof params.subgroup === 'string' ? params.subgroup : undefined;
283
304
  const subgroupExact = params.subgroupExact === true;
284
305
  const groupDir = getGroupDir(getDefaultGitnexusDir(), name);
285
- const config = await loadGroupConfig(groupDir);
306
+ let config;
307
+ try {
308
+ config = await loadGroupConfig(groupDir);
309
+ }
310
+ catch (err) {
311
+ if (err instanceof GroupNotFoundError)
312
+ return { error: `Group "${name}" not found. Run group_list to see configured groups.` };
313
+ throw err;
314
+ }
286
315
  const memberEntries = Object.entries(config.repos).filter(([repoPath]) => repoInSubgroup(repoPath, subgroup, subgroupExact));
287
- // Per-repo query is independent; run them concurrently and isolate
288
- // failures so one slow/failed member does not block the rest.
289
316
  const perRepo = await Promise.all(memberEntries.map(async ([repoPath, registryName]) => {
290
317
  try {
291
318
  const repoObj = await this.port.resolveRepo(registryName);
@@ -324,7 +351,15 @@ export class GroupService {
324
351
  if (!name)
325
352
  return { error: 'name is required' };
326
353
  const groupDir = getGroupDir(getDefaultGitnexusDir(), name);
327
- const config = await loadGroupConfig(groupDir);
354
+ let config;
355
+ try {
356
+ config = await loadGroupConfig(groupDir);
357
+ }
358
+ catch (err) {
359
+ if (err instanceof GroupNotFoundError)
360
+ return { error: `Group "${name}" not found. Run group_list to see configured groups.` };
361
+ throw err;
362
+ }
328
363
  const registry = await readContractRegistry(groupDir);
329
364
  const repoStatuses = {};
330
365
  for (const [repoPath, registryName] of Object.entries(config.repos)) {
@@ -1,5 +1,20 @@
1
1
  import Parser from 'tree-sitter';
2
- export interface ASTCache {
2
+ /**
3
+ * Minimal structural shape consumers need when reading Trees back
4
+ * through a phase-dependency boundary. Declared here so phases that
5
+ * receive ASTCache via `getPhaseOutput<...>` don't hand-roll their
6
+ * own inline structural types that silently drift when ASTCache's
7
+ * contract changes.
8
+ *
9
+ * Typed as `unknown` at the Tree boundary because consumers on the
10
+ * other side of the phase-output map don't share tree-sitter's type
11
+ * graph (e.g. COBOL's standalone processor).
12
+ */
13
+ export interface ASTCacheReader {
14
+ get(filePath: string): unknown;
15
+ clear(): void;
16
+ }
17
+ export interface ASTCache extends ASTCacheReader {
3
18
  get: (filePath: string) => Parser.Tree | undefined;
4
19
  set: (filePath: string, tree: Parser.Tree) => void;
5
20
  clear: () => void;
@@ -7,8 +7,20 @@ export const createASTCache = (maxSize = 50) => {
7
7
  max: effectiveMax,
8
8
  dispose: (tree) => {
9
9
  try {
10
- // NOTE: web-tree-sitter has tree.delete(); native tree-sitter trees are GC-managed.
11
- // Keep this try/catch so we don't crash on either runtime.
10
+ // NOTE: web-tree-sitter has tree.delete(); native tree-sitter
11
+ // trees are GC-managed and .delete is absent (no-op here).
12
+ //
13
+ // Single-owner invariant (load-bearing under WASM): a given
14
+ // Parser.Tree reference must live in AT MOST ONE ASTCache
15
+ // that disposes. The parse-phase chunk-local cache clears
16
+ // between chunks; the cross-phase `scopeTreeCache` (also an
17
+ // ASTCache today) holds the same Tree by reference. Under
18
+ // native tree-sitter this is benign (dispose is a no-op).
19
+ // If/when GitNexus adopts web-tree-sitter for sequential
20
+ // parsing, the cross-phase cache must either (a) skip
21
+ // writing Trees that are already owned by a disposing cache,
22
+ // or (b) use tree.copy() per entry. Failing to pick one
23
+ // will hand freed memory to scope-resolution.
12
24
  tree.delete?.();
13
25
  }
14
26
  catch (e) {