gitnexus 1.3.8 → 1.3.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +194 -194
- package/dist/cli/ai-context.js +87 -87
- package/dist/cli/index.js +15 -25
- package/dist/cli/lazy-action.d.ts +6 -0
- package/dist/cli/lazy-action.js +18 -0
- package/dist/core/augmentation/engine.js +20 -20
- package/dist/core/embeddings/embedding-pipeline.js +26 -26
- package/dist/core/ingestion/ast-cache.js +3 -2
- package/dist/core/ingestion/cluster-enricher.js +16 -16
- package/dist/core/ingestion/pipeline.js +8 -0
- package/dist/core/ingestion/tree-sitter-queries.js +484 -484
- package/dist/core/ingestion/workers/parse-worker.js +4 -2
- package/dist/core/kuzu/kuzu-adapter.js +9 -9
- package/dist/core/kuzu/schema.js +287 -287
- package/dist/core/search/bm25-index.js +5 -5
- package/dist/core/search/hybrid-search.js +3 -3
- package/dist/core/wiki/graph-queries.js +52 -52
- package/dist/core/wiki/html-viewer.js +192 -192
- package/dist/core/wiki/prompts.js +82 -82
- package/dist/mcp/compatible-stdio-transport.d.ts +25 -0
- package/dist/mcp/compatible-stdio-transport.js +200 -0
- package/dist/mcp/local/local-backend.js +128 -128
- package/dist/mcp/resources.js +42 -42
- package/dist/mcp/server.js +18 -18
- package/dist/mcp/tools.js +86 -86
- package/hooks/claude/gitnexus-hook.cjs +155 -155
- package/hooks/claude/pre-tool-use.sh +79 -79
- package/hooks/claude/session-start.sh +42 -42
- package/package.json +96 -96
- package/scripts/patch-tree-sitter-swift.cjs +74 -74
- package/skills/gitnexus-cli.md +82 -82
- package/skills/gitnexus-debugging.md +89 -89
- package/skills/gitnexus-exploring.md +78 -78
- package/skills/gitnexus-guide.md +64 -64
- package/skills/gitnexus-impact-analysis.md +97 -97
- package/skills/gitnexus-pr-review.md +163 -163
- package/skills/gitnexus-refactoring.md +121 -121
- package/vendor/leiden/index.cjs +355 -355
- package/vendor/leiden/utils.cjs +392 -392
package/dist/cli/index.js
CHANGED
|
@@ -2,18 +2,8 @@
|
|
|
2
2
|
// Heap re-spawn removed — only analyze.ts needs the 8GB heap (via its own ensureHeap()).
|
|
3
3
|
// Removing it from here improves MCP server startup time significantly.
|
|
4
4
|
import { Command } from 'commander';
|
|
5
|
-
import { analyzeCommand } from './analyze.js';
|
|
6
|
-
import { serveCommand } from './serve.js';
|
|
7
|
-
import { listCommand } from './list.js';
|
|
8
|
-
import { statusCommand } from './status.js';
|
|
9
|
-
import { mcpCommand } from './mcp.js';
|
|
10
|
-
import { cleanCommand } from './clean.js';
|
|
11
|
-
import { setupCommand } from './setup.js';
|
|
12
|
-
import { augmentCommand } from './augment.js';
|
|
13
|
-
import { wikiCommand } from './wiki.js';
|
|
14
|
-
import { queryCommand, contextCommand, impactCommand, cypherCommand } from './tool.js';
|
|
15
|
-
import { evalServerCommand } from './eval-server.js';
|
|
16
5
|
import { createRequire } from 'node:module';
|
|
6
|
+
import { createLazyAction } from './lazy-action.js';
|
|
17
7
|
const _require = createRequire(import.meta.url);
|
|
18
8
|
const pkg = _require('../../package.json');
|
|
19
9
|
const program = new Command();
|
|
@@ -24,37 +14,37 @@ program
|
|
|
24
14
|
program
|
|
25
15
|
.command('setup')
|
|
26
16
|
.description('One-time setup: configure MCP for Cursor, Claude Code, OpenCode')
|
|
27
|
-
.action(setupCommand);
|
|
17
|
+
.action(createLazyAction(() => import('./setup.js'), 'setupCommand'));
|
|
28
18
|
program
|
|
29
19
|
.command('analyze [path]')
|
|
30
20
|
.description('Index a repository (full analysis)')
|
|
31
21
|
.option('-f, --force', 'Force full re-index even if up to date')
|
|
32
22
|
.option('--embeddings', 'Enable embedding generation for semantic search (off by default)')
|
|
33
|
-
.action(analyzeCommand);
|
|
23
|
+
.action(createLazyAction(() => import('./analyze.js'), 'analyzeCommand'));
|
|
34
24
|
program
|
|
35
25
|
.command('serve')
|
|
36
26
|
.description('Start local HTTP server for web UI connection')
|
|
37
27
|
.option('-p, --port <port>', 'Port number', '4747')
|
|
38
28
|
.option('--host <host>', 'Bind address (default: 127.0.0.1, use 0.0.0.0 for remote access)')
|
|
39
|
-
.action(serveCommand);
|
|
29
|
+
.action(createLazyAction(() => import('./serve.js'), 'serveCommand'));
|
|
40
30
|
program
|
|
41
31
|
.command('mcp')
|
|
42
32
|
.description('Start MCP server (stdio) — serves all indexed repos')
|
|
43
|
-
.action(mcpCommand);
|
|
33
|
+
.action(createLazyAction(() => import('./mcp.js'), 'mcpCommand'));
|
|
44
34
|
program
|
|
45
35
|
.command('list')
|
|
46
36
|
.description('List all indexed repositories')
|
|
47
|
-
.action(listCommand);
|
|
37
|
+
.action(createLazyAction(() => import('./list.js'), 'listCommand'));
|
|
48
38
|
program
|
|
49
39
|
.command('status')
|
|
50
40
|
.description('Show index status for current repo')
|
|
51
|
-
.action(statusCommand);
|
|
41
|
+
.action(createLazyAction(() => import('./status.js'), 'statusCommand'));
|
|
52
42
|
program
|
|
53
43
|
.command('clean')
|
|
54
44
|
.description('Delete GitNexus index for current repo')
|
|
55
45
|
.option('-f, --force', 'Skip confirmation prompt')
|
|
56
46
|
.option('--all', 'Clean all indexed repos')
|
|
57
|
-
.action(cleanCommand);
|
|
47
|
+
.action(createLazyAction(() => import('./clean.js'), 'cleanCommand'));
|
|
58
48
|
program
|
|
59
49
|
.command('wiki [path]')
|
|
60
50
|
.description('Generate repository wiki from knowledge graph')
|
|
@@ -64,11 +54,11 @@ program
|
|
|
64
54
|
.option('--api-key <key>', 'LLM API key (saved to ~/.gitnexus/config.json)')
|
|
65
55
|
.option('--concurrency <n>', 'Parallel LLM calls (default: 3)', '3')
|
|
66
56
|
.option('--gist', 'Publish wiki as a public GitHub Gist after generation')
|
|
67
|
-
.action(wikiCommand);
|
|
57
|
+
.action(createLazyAction(() => import('./wiki.js'), 'wikiCommand'));
|
|
68
58
|
program
|
|
69
59
|
.command('augment <pattern>')
|
|
70
60
|
.description('Augment a search pattern with knowledge graph context (used by hooks)')
|
|
71
|
-
.action(augmentCommand);
|
|
61
|
+
.action(createLazyAction(() => import('./augment.js'), 'augmentCommand'));
|
|
72
62
|
// ─── Direct Tool Commands (no MCP overhead) ────────────────────────
|
|
73
63
|
// These invoke LocalBackend directly for use in eval, scripts, and CI.
|
|
74
64
|
program
|
|
@@ -79,7 +69,7 @@ program
|
|
|
79
69
|
.option('-g, --goal <text>', 'What you want to find')
|
|
80
70
|
.option('-l, --limit <n>', 'Max processes to return (default: 5)')
|
|
81
71
|
.option('--content', 'Include full symbol source code')
|
|
82
|
-
.action(queryCommand);
|
|
72
|
+
.action(createLazyAction(() => import('./tool.js'), 'queryCommand'));
|
|
83
73
|
program
|
|
84
74
|
.command('context [name]')
|
|
85
75
|
.description('360-degree view of a code symbol: callers, callees, processes')
|
|
@@ -87,7 +77,7 @@ program
|
|
|
87
77
|
.option('-u, --uid <uid>', 'Direct symbol UID (zero-ambiguity lookup)')
|
|
88
78
|
.option('-f, --file <path>', 'File path to disambiguate common names')
|
|
89
79
|
.option('--content', 'Include full symbol source code')
|
|
90
|
-
.action(contextCommand);
|
|
80
|
+
.action(createLazyAction(() => import('./tool.js'), 'contextCommand'));
|
|
91
81
|
program
|
|
92
82
|
.command('impact <target>')
|
|
93
83
|
.description('Blast radius analysis: what breaks if you change a symbol')
|
|
@@ -95,17 +85,17 @@ program
|
|
|
95
85
|
.option('-r, --repo <name>', 'Target repository')
|
|
96
86
|
.option('--depth <n>', 'Max relationship depth (default: 3)')
|
|
97
87
|
.option('--include-tests', 'Include test files in results')
|
|
98
|
-
.action(impactCommand);
|
|
88
|
+
.action(createLazyAction(() => import('./tool.js'), 'impactCommand'));
|
|
99
89
|
program
|
|
100
90
|
.command('cypher <query>')
|
|
101
91
|
.description('Execute raw Cypher query against the knowledge graph')
|
|
102
92
|
.option('-r, --repo <name>', 'Target repository')
|
|
103
|
-
.action(cypherCommand);
|
|
93
|
+
.action(createLazyAction(() => import('./tool.js'), 'cypherCommand'));
|
|
104
94
|
// ─── Eval Server (persistent daemon for SWE-bench) ─────────────────
|
|
105
95
|
program
|
|
106
96
|
.command('eval-server')
|
|
107
97
|
.description('Start lightweight HTTP server for fast tool calls during evaluation')
|
|
108
98
|
.option('-p, --port <port>', 'Port number', '4848')
|
|
109
99
|
.option('--idle-timeout <seconds>', 'Auto-shutdown after N seconds idle (0 = disabled)', '0')
|
|
110
|
-
.action(evalServerCommand);
|
|
100
|
+
.action(createLazyAction(() => import('./eval-server.js'), 'evalServerCommand'));
|
|
111
101
|
program.parse(process.argv);
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Creates a lazy-loaded CLI action that defers module import until invocation.
|
|
3
|
+
* The generic constraints ensure the export name is a valid key of the module
|
|
4
|
+
* at compile time — catching typos when used with concrete module imports.
|
|
5
|
+
*/
|
|
6
|
+
export declare function createLazyAction<TModule extends Record<string, unknown>, TKey extends string & keyof TModule>(loader: () => Promise<TModule>, exportName: TKey): (...args: unknown[]) => Promise<void>;
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Creates a lazy-loaded CLI action that defers module import until invocation.
|
|
3
|
+
* The generic constraints ensure the export name is a valid key of the module
|
|
4
|
+
* at compile time — catching typos when used with concrete module imports.
|
|
5
|
+
*/
|
|
6
|
+
function isCallable(value) {
|
|
7
|
+
return typeof value === 'function';
|
|
8
|
+
}
|
|
9
|
+
export function createLazyAction(loader, exportName) {
|
|
10
|
+
return async (...args) => {
|
|
11
|
+
const module = await loader();
|
|
12
|
+
const action = module[exportName];
|
|
13
|
+
if (!isCallable(action)) {
|
|
14
|
+
throw new Error(`Lazy action export not found: ${exportName}`);
|
|
15
|
+
}
|
|
16
|
+
await action(...args);
|
|
17
|
+
};
|
|
18
|
+
}
|
|
@@ -98,11 +98,11 @@ export async function augment(pattern, cwd) {
|
|
|
98
98
|
for (const result of bm25Results.slice(0, 5)) {
|
|
99
99
|
const escaped = result.filePath.replace(/'/g, "''");
|
|
100
100
|
try {
|
|
101
|
-
const symbols = await executeQuery(repoId, `
|
|
102
|
-
MATCH (n) WHERE n.filePath = '${escaped}'
|
|
103
|
-
AND n.name CONTAINS '${pattern.replace(/'/g, "''").split(/\s+/)[0]}'
|
|
104
|
-
RETURN n.id AS id, n.name AS name, labels(n)[0] AS type, n.filePath AS filePath
|
|
105
|
-
LIMIT 3
|
|
101
|
+
const symbols = await executeQuery(repoId, `
|
|
102
|
+
MATCH (n) WHERE n.filePath = '${escaped}'
|
|
103
|
+
AND n.name CONTAINS '${pattern.replace(/'/g, "''").split(/\s+/)[0]}'
|
|
104
|
+
RETURN n.id AS id, n.name AS name, labels(n)[0] AS type, n.filePath AS filePath
|
|
105
|
+
LIMIT 3
|
|
106
106
|
`);
|
|
107
107
|
for (const sym of symbols) {
|
|
108
108
|
symbolMatches.push({
|
|
@@ -130,10 +130,10 @@ export async function augment(pattern, cwd) {
|
|
|
130
130
|
// Callers
|
|
131
131
|
let callers = [];
|
|
132
132
|
try {
|
|
133
|
-
const rows = await executeQuery(repoId, `
|
|
134
|
-
MATCH (caller)-[:CodeRelation {type: 'CALLS'}]->(n {id: '${escaped}'})
|
|
135
|
-
RETURN caller.name AS name
|
|
136
|
-
LIMIT 3
|
|
133
|
+
const rows = await executeQuery(repoId, `
|
|
134
|
+
MATCH (caller)-[:CodeRelation {type: 'CALLS'}]->(n {id: '${escaped}'})
|
|
135
|
+
RETURN caller.name AS name
|
|
136
|
+
LIMIT 3
|
|
137
137
|
`);
|
|
138
138
|
callers = rows.map((r) => r.name || r[0]).filter(Boolean);
|
|
139
139
|
}
|
|
@@ -141,10 +141,10 @@ export async function augment(pattern, cwd) {
|
|
|
141
141
|
// Callees
|
|
142
142
|
let callees = [];
|
|
143
143
|
try {
|
|
144
|
-
const rows = await executeQuery(repoId, `
|
|
145
|
-
MATCH (n {id: '${escaped}'})-[:CodeRelation {type: 'CALLS'}]->(callee)
|
|
146
|
-
RETURN callee.name AS name
|
|
147
|
-
LIMIT 3
|
|
144
|
+
const rows = await executeQuery(repoId, `
|
|
145
|
+
MATCH (n {id: '${escaped}'})-[:CodeRelation {type: 'CALLS'}]->(callee)
|
|
146
|
+
RETURN callee.name AS name
|
|
147
|
+
LIMIT 3
|
|
148
148
|
`);
|
|
149
149
|
callees = rows.map((r) => r.name || r[0]).filter(Boolean);
|
|
150
150
|
}
|
|
@@ -152,9 +152,9 @@ export async function augment(pattern, cwd) {
|
|
|
152
152
|
// Processes
|
|
153
153
|
let processes = [];
|
|
154
154
|
try {
|
|
155
|
-
const rows = await executeQuery(repoId, `
|
|
156
|
-
MATCH (n {id: '${escaped}'})-[r:CodeRelation {type: 'STEP_IN_PROCESS'}]->(p:Process)
|
|
157
|
-
RETURN p.heuristicLabel AS label, r.step AS step, p.stepCount AS stepCount
|
|
155
|
+
const rows = await executeQuery(repoId, `
|
|
156
|
+
MATCH (n {id: '${escaped}'})-[r:CodeRelation {type: 'STEP_IN_PROCESS'}]->(p:Process)
|
|
157
|
+
RETURN p.heuristicLabel AS label, r.step AS step, p.stepCount AS stepCount
|
|
158
158
|
`);
|
|
159
159
|
processes = rows.map((r) => {
|
|
160
160
|
const label = r.label || r[0];
|
|
@@ -167,10 +167,10 @@ export async function augment(pattern, cwd) {
|
|
|
167
167
|
// Cluster cohesion (internal ranking signal)
|
|
168
168
|
let cohesion = 0;
|
|
169
169
|
try {
|
|
170
|
-
const rows = await executeQuery(repoId, `
|
|
171
|
-
MATCH (n {id: '${escaped}'})-[:CodeRelation {type: 'MEMBER_OF'}]->(c:Community)
|
|
172
|
-
RETURN c.cohesion AS cohesion
|
|
173
|
-
LIMIT 1
|
|
170
|
+
const rows = await executeQuery(repoId, `
|
|
171
|
+
MATCH (n {id: '${escaped}'})-[:CodeRelation {type: 'MEMBER_OF'}]->(c:Community)
|
|
172
|
+
RETURN c.cohesion AS cohesion
|
|
173
|
+
LIMIT 1
|
|
174
174
|
`);
|
|
175
175
|
if (rows.length > 0) {
|
|
176
176
|
cohesion = (rows[0].cohesion ?? rows[0][0]) || 0;
|
|
@@ -24,19 +24,19 @@ const queryEmbeddableNodes = async (executeQuery) => {
|
|
|
24
24
|
let query;
|
|
25
25
|
if (label === 'File') {
|
|
26
26
|
// File nodes don't have startLine/endLine
|
|
27
|
-
query = `
|
|
28
|
-
MATCH (n:File)
|
|
29
|
-
RETURN n.id AS id, n.name AS name, 'File' AS label,
|
|
30
|
-
n.filePath AS filePath, n.content AS content
|
|
27
|
+
query = `
|
|
28
|
+
MATCH (n:File)
|
|
29
|
+
RETURN n.id AS id, n.name AS name, 'File' AS label,
|
|
30
|
+
n.filePath AS filePath, n.content AS content
|
|
31
31
|
`;
|
|
32
32
|
}
|
|
33
33
|
else {
|
|
34
34
|
// Code elements have startLine/endLine
|
|
35
|
-
query = `
|
|
36
|
-
MATCH (n:${label})
|
|
37
|
-
RETURN n.id AS id, n.name AS name, '${label}' AS label,
|
|
38
|
-
n.filePath AS filePath, n.content AS content,
|
|
39
|
-
n.startLine AS startLine, n.endLine AS endLine
|
|
35
|
+
query = `
|
|
36
|
+
MATCH (n:${label})
|
|
37
|
+
RETURN n.id AS id, n.name AS name, '${label}' AS label,
|
|
38
|
+
n.filePath AS filePath, n.content AS content,
|
|
39
|
+
n.startLine AS startLine, n.endLine AS endLine
|
|
40
40
|
`;
|
|
41
41
|
}
|
|
42
42
|
const rows = await executeQuery(query);
|
|
@@ -77,8 +77,8 @@ const batchInsertEmbeddings = async (executeWithReusedStatement, updates) => {
|
|
|
77
77
|
* Now indexes the separate CodeEmbedding table
|
|
78
78
|
*/
|
|
79
79
|
const createVectorIndex = async (executeQuery) => {
|
|
80
|
-
const cypher = `
|
|
81
|
-
CALL CREATE_VECTOR_INDEX('CodeEmbedding', 'code_embedding_idx', 'embedding', metric := 'cosine')
|
|
80
|
+
const cypher = `
|
|
81
|
+
CALL CREATE_VECTOR_INDEX('CodeEmbedding', 'code_embedding_idx', 'embedding', metric := 'cosine')
|
|
82
82
|
`;
|
|
83
83
|
try {
|
|
84
84
|
await executeQuery(cypher);
|
|
@@ -240,14 +240,14 @@ export const semanticSearch = async (executeQuery, query, k = 10, maxDistance =
|
|
|
240
240
|
const queryVec = embeddingToArray(queryEmbedding);
|
|
241
241
|
const queryVecStr = `[${queryVec.join(',')}]`;
|
|
242
242
|
// Query the vector index on CodeEmbedding to get nodeIds and distances
|
|
243
|
-
const vectorQuery = `
|
|
244
|
-
CALL QUERY_VECTOR_INDEX('CodeEmbedding', 'code_embedding_idx',
|
|
245
|
-
CAST(${queryVecStr} AS FLOAT[384]), ${k})
|
|
246
|
-
YIELD node AS emb, distance
|
|
247
|
-
WITH emb, distance
|
|
248
|
-
WHERE distance < ${maxDistance}
|
|
249
|
-
RETURN emb.nodeId AS nodeId, distance
|
|
250
|
-
ORDER BY distance
|
|
243
|
+
const vectorQuery = `
|
|
244
|
+
CALL QUERY_VECTOR_INDEX('CodeEmbedding', 'code_embedding_idx',
|
|
245
|
+
CAST(${queryVecStr} AS FLOAT[384]), ${k})
|
|
246
|
+
YIELD node AS emb, distance
|
|
247
|
+
WITH emb, distance
|
|
248
|
+
WHERE distance < ${maxDistance}
|
|
249
|
+
RETURN emb.nodeId AS nodeId, distance
|
|
250
|
+
ORDER BY distance
|
|
251
251
|
`;
|
|
252
252
|
const embResults = await executeQuery(vectorQuery);
|
|
253
253
|
if (embResults.length === 0) {
|
|
@@ -266,16 +266,16 @@ export const semanticSearch = async (executeQuery, query, k = 10, maxDistance =
|
|
|
266
266
|
try {
|
|
267
267
|
let nodeQuery;
|
|
268
268
|
if (label === 'File') {
|
|
269
|
-
nodeQuery = `
|
|
270
|
-
MATCH (n:File {id: '${nodeId.replace(/'/g, "''")}'})
|
|
271
|
-
RETURN n.name AS name, n.filePath AS filePath
|
|
269
|
+
nodeQuery = `
|
|
270
|
+
MATCH (n:File {id: '${nodeId.replace(/'/g, "''")}'})
|
|
271
|
+
RETURN n.name AS name, n.filePath AS filePath
|
|
272
272
|
`;
|
|
273
273
|
}
|
|
274
274
|
else {
|
|
275
|
-
nodeQuery = `
|
|
276
|
-
MATCH (n:${label} {id: '${nodeId.replace(/'/g, "''")}'})
|
|
277
|
-
RETURN n.name AS name, n.filePath AS filePath,
|
|
278
|
-
n.startLine AS startLine, n.endLine AS endLine
|
|
275
|
+
nodeQuery = `
|
|
276
|
+
MATCH (n:${label} {id: '${nodeId.replace(/'/g, "''")}'})
|
|
277
|
+
RETURN n.name AS name, n.filePath AS filePath,
|
|
278
|
+
n.startLine AS startLine, n.endLine AS endLine
|
|
279
279
|
`;
|
|
280
280
|
}
|
|
281
281
|
const nodeRows = await executeQuery(nodeQuery);
|
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
import { LRUCache } from 'lru-cache';
|
|
2
2
|
export const createASTCache = (maxSize = 50) => {
|
|
3
|
+
const effectiveMax = Math.max(maxSize, 1);
|
|
3
4
|
// Initialize the cache with a 'dispose' handler
|
|
4
5
|
// This is the magic: When an item is evicted (dropped), this runs automatically.
|
|
5
6
|
const cache = new LRUCache({
|
|
6
|
-
max:
|
|
7
|
+
max: effectiveMax,
|
|
7
8
|
dispose: (tree) => {
|
|
8
9
|
try {
|
|
9
10
|
// NOTE: web-tree-sitter has tree.delete(); native tree-sitter trees are GC-managed.
|
|
@@ -28,7 +29,7 @@ export const createASTCache = (maxSize = 50) => {
|
|
|
28
29
|
},
|
|
29
30
|
stats: () => ({
|
|
30
31
|
size: cache.size,
|
|
31
|
-
maxSize:
|
|
32
|
+
maxSize: effectiveMax
|
|
32
33
|
})
|
|
33
34
|
};
|
|
34
35
|
};
|
|
@@ -13,12 +13,12 @@ const buildEnrichmentPrompt = (members, heuristicLabel) => {
|
|
|
13
13
|
const memberList = limitedMembers
|
|
14
14
|
.map(m => `${m.name} (${m.type})`)
|
|
15
15
|
.join(', ');
|
|
16
|
-
return `Analyze this code cluster and provide a semantic name and short description.
|
|
17
|
-
|
|
18
|
-
Heuristic: "${heuristicLabel}"
|
|
19
|
-
Members: ${memberList}${members.length > 20 ? ` (+${members.length - 20} more)` : ''}
|
|
20
|
-
|
|
21
|
-
Reply with JSON only:
|
|
16
|
+
return `Analyze this code cluster and provide a semantic name and short description.
|
|
17
|
+
|
|
18
|
+
Heuristic: "${heuristicLabel}"
|
|
19
|
+
Members: ${memberList}${members.length > 20 ? ` (+${members.length - 20} more)` : ''}
|
|
20
|
+
|
|
21
|
+
Reply with JSON only:
|
|
22
22
|
{"name": "2-4 word semantic name", "description": "One sentence describing purpose"}`;
|
|
23
23
|
};
|
|
24
24
|
// ============================================================================
|
|
@@ -115,18 +115,18 @@ export const enrichClustersBatch = async (communities, memberMap, llmClient, bat
|
|
|
115
115
|
const memberList = limitedMembers
|
|
116
116
|
.map(m => `${m.name} (${m.type})`)
|
|
117
117
|
.join(', ');
|
|
118
|
-
return `Cluster ${idx + 1} (id: ${community.id}):
|
|
119
|
-
Heuristic: "${community.heuristicLabel}"
|
|
118
|
+
return `Cluster ${idx + 1} (id: ${community.id}):
|
|
119
|
+
Heuristic: "${community.heuristicLabel}"
|
|
120
120
|
Members: ${memberList}`;
|
|
121
121
|
}).join('\n\n');
|
|
122
|
-
const prompt = `Analyze these code clusters and generate semantic names, keywords, and descriptions.
|
|
123
|
-
|
|
124
|
-
${batchPrompt}
|
|
125
|
-
|
|
126
|
-
Output JSON array:
|
|
127
|
-
[
|
|
128
|
-
{"id": "comm_X", "name": "...", "keywords": [...], "description": "..."},
|
|
129
|
-
...
|
|
122
|
+
const prompt = `Analyze these code clusters and generate semantic names, keywords, and descriptions.
|
|
123
|
+
|
|
124
|
+
${batchPrompt}
|
|
125
|
+
|
|
126
|
+
Output JSON array:
|
|
127
|
+
[
|
|
128
|
+
{"id": "comm_X", "name": "...", "keywords": [...], "description": "..."},
|
|
129
|
+
...
|
|
130
130
|
]`;
|
|
131
131
|
try {
|
|
132
132
|
const response = await llmClient.generate(prompt);
|
|
@@ -87,6 +87,14 @@ export const runPipelineFromRepo = async (repoPath, onProgress) => {
|
|
|
87
87
|
console.warn(`Skipping ${count} ${lang} file(s) — ${lang} parser not available (native binding may not have built). Try: npm rebuild tree-sitter-${lang}`);
|
|
88
88
|
}
|
|
89
89
|
const totalParseable = parseableScanned.length;
|
|
90
|
+
if (totalParseable === 0) {
|
|
91
|
+
onProgress({
|
|
92
|
+
phase: 'parsing',
|
|
93
|
+
percent: 82,
|
|
94
|
+
message: 'No parseable files found — skipping parsing phase',
|
|
95
|
+
stats: { filesProcessed: 0, totalFiles: 0, nodesCreated: graph.nodeCount },
|
|
96
|
+
});
|
|
97
|
+
}
|
|
90
98
|
// Build byte-budget chunks
|
|
91
99
|
const chunks = [];
|
|
92
100
|
let currentChunk = [];
|