gitnexus 1.1.9 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +50 -59
- package/dist/cli/analyze.js +114 -32
- package/dist/cli/eval-server.d.ts +30 -0
- package/dist/cli/eval-server.js +372 -0
- package/dist/cli/index.js +51 -1
- package/dist/cli/mcp.js +9 -0
- package/dist/cli/setup.js +44 -7
- package/dist/cli/tool.d.ts +37 -0
- package/dist/cli/tool.js +91 -0
- package/dist/cli/wiki.d.ts +13 -0
- package/dist/cli/wiki.js +199 -0
- package/dist/core/embeddings/embedder.d.ts +2 -2
- package/dist/core/embeddings/embedder.js +10 -10
- package/dist/core/embeddings/embedding-pipeline.d.ts +2 -1
- package/dist/core/embeddings/embedding-pipeline.js +12 -4
- package/dist/core/embeddings/types.d.ts +2 -2
- package/dist/core/ingestion/call-processor.d.ts +7 -0
- package/dist/core/ingestion/call-processor.js +61 -23
- package/dist/core/ingestion/community-processor.js +34 -26
- package/dist/core/ingestion/filesystem-walker.js +15 -10
- package/dist/core/ingestion/heritage-processor.d.ts +6 -0
- package/dist/core/ingestion/heritage-processor.js +68 -5
- package/dist/core/ingestion/import-processor.d.ts +22 -0
- package/dist/core/ingestion/import-processor.js +214 -19
- package/dist/core/ingestion/parsing-processor.d.ts +8 -1
- package/dist/core/ingestion/parsing-processor.js +66 -25
- package/dist/core/ingestion/pipeline.js +103 -39
- package/dist/core/ingestion/workers/parse-worker.d.ts +58 -0
- package/dist/core/ingestion/workers/parse-worker.js +451 -0
- package/dist/core/ingestion/workers/worker-pool.d.ts +22 -0
- package/dist/core/ingestion/workers/worker-pool.js +65 -0
- package/dist/core/kuzu/kuzu-adapter.d.ts +15 -1
- package/dist/core/kuzu/kuzu-adapter.js +177 -67
- package/dist/core/kuzu/schema.d.ts +1 -1
- package/dist/core/kuzu/schema.js +3 -0
- package/dist/core/wiki/generator.d.ts +96 -0
- package/dist/core/wiki/generator.js +674 -0
- package/dist/core/wiki/graph-queries.d.ts +80 -0
- package/dist/core/wiki/graph-queries.js +238 -0
- package/dist/core/wiki/html-viewer.d.ts +10 -0
- package/dist/core/wiki/html-viewer.js +297 -0
- package/dist/core/wiki/llm-client.d.ts +36 -0
- package/dist/core/wiki/llm-client.js +111 -0
- package/dist/core/wiki/prompts.d.ts +53 -0
- package/dist/core/wiki/prompts.js +174 -0
- package/dist/mcp/core/embedder.js +4 -2
- package/dist/mcp/core/kuzu-adapter.d.ts +2 -1
- package/dist/mcp/core/kuzu-adapter.js +35 -15
- package/dist/mcp/local/local-backend.js +9 -2
- package/dist/mcp/server.js +1 -1
- package/dist/storage/git.d.ts +0 -1
- package/dist/storage/git.js +1 -8
- package/dist/storage/repo-manager.d.ts +17 -0
- package/dist/storage/repo-manager.js +26 -0
- package/package.json +1 -1
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Client for Wiki Generation
|
|
3
|
+
*
|
|
4
|
+
* OpenAI-compatible API client using native fetch.
|
|
5
|
+
* Supports OpenAI, Azure, LiteLLM, Ollama, and any OpenAI-compatible endpoint.
|
|
6
|
+
*
|
|
7
|
+
* Config priority: CLI flags > env vars > defaults
|
|
8
|
+
*/
|
|
9
|
+
/**
|
|
10
|
+
* Resolve LLM configuration from env vars, saved config, and optional overrides.
|
|
11
|
+
* Priority: overrides (CLI flags) > env vars > ~/.gitnexus/config.json > error
|
|
12
|
+
*
|
|
13
|
+
* If no API key is found, returns config with empty apiKey (caller should handle).
|
|
14
|
+
*/
|
|
15
|
+
export async function resolveLLMConfig(overrides) {
|
|
16
|
+
const { loadCLIConfig } = await import('../../storage/repo-manager.js');
|
|
17
|
+
const savedConfig = await loadCLIConfig();
|
|
18
|
+
const apiKey = overrides?.apiKey
|
|
19
|
+
|| process.env.GITNEXUS_API_KEY
|
|
20
|
+
|| process.env.OPENAI_API_KEY
|
|
21
|
+
|| savedConfig.apiKey
|
|
22
|
+
|| '';
|
|
23
|
+
return {
|
|
24
|
+
apiKey,
|
|
25
|
+
baseUrl: overrides?.baseUrl
|
|
26
|
+
|| process.env.GITNEXUS_LLM_BASE_URL
|
|
27
|
+
|| savedConfig.baseUrl
|
|
28
|
+
|| 'https://api.openai.com/v1',
|
|
29
|
+
model: overrides?.model
|
|
30
|
+
|| process.env.GITNEXUS_MODEL
|
|
31
|
+
|| savedConfig.model
|
|
32
|
+
|| 'gpt-4o-mini',
|
|
33
|
+
maxTokens: overrides?.maxTokens ?? 16_384,
|
|
34
|
+
temperature: overrides?.temperature ?? 0,
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
/**
|
|
38
|
+
* Estimate token count from text (rough heuristic: ~4 chars per token).
|
|
39
|
+
*/
|
|
40
|
+
export function estimateTokens(text) {
|
|
41
|
+
return Math.ceil(text.length / 4);
|
|
42
|
+
}
|
|
43
|
+
/**
|
|
44
|
+
* Call an OpenAI-compatible LLM API.
|
|
45
|
+
* Retries once on transient failures (5xx, network errors).
|
|
46
|
+
*/
|
|
47
|
+
export async function callLLM(prompt, config, systemPrompt) {
|
|
48
|
+
const messages = [];
|
|
49
|
+
if (systemPrompt) {
|
|
50
|
+
messages.push({ role: 'system', content: systemPrompt });
|
|
51
|
+
}
|
|
52
|
+
messages.push({ role: 'user', content: prompt });
|
|
53
|
+
const url = `${config.baseUrl.replace(/\/+$/, '')}/chat/completions`;
|
|
54
|
+
const body = {
|
|
55
|
+
model: config.model,
|
|
56
|
+
messages,
|
|
57
|
+
max_tokens: config.maxTokens,
|
|
58
|
+
temperature: config.temperature,
|
|
59
|
+
};
|
|
60
|
+
let lastError = null;
|
|
61
|
+
for (let attempt = 0; attempt < 2; attempt++) {
|
|
62
|
+
try {
|
|
63
|
+
const response = await fetch(url, {
|
|
64
|
+
method: 'POST',
|
|
65
|
+
headers: {
|
|
66
|
+
'Content-Type': 'application/json',
|
|
67
|
+
'Authorization': `Bearer ${config.apiKey}`,
|
|
68
|
+
},
|
|
69
|
+
body: JSON.stringify(body),
|
|
70
|
+
});
|
|
71
|
+
if (!response.ok) {
|
|
72
|
+
const errorText = await response.text().catch(() => 'unknown error');
|
|
73
|
+
// Rate limit — wait and retry
|
|
74
|
+
if (response.status === 429 && attempt === 0) {
|
|
75
|
+
const retryAfter = parseInt(response.headers.get('retry-after') || '5', 10);
|
|
76
|
+
await sleep(retryAfter * 1000);
|
|
77
|
+
continue;
|
|
78
|
+
}
|
|
79
|
+
// Server error — retry once
|
|
80
|
+
if (response.status >= 500 && attempt === 0) {
|
|
81
|
+
await sleep(2000);
|
|
82
|
+
continue;
|
|
83
|
+
}
|
|
84
|
+
throw new Error(`LLM API error (${response.status}): ${errorText.slice(0, 500)}`);
|
|
85
|
+
}
|
|
86
|
+
const json = await response.json();
|
|
87
|
+
const choice = json.choices?.[0];
|
|
88
|
+
if (!choice?.message?.content) {
|
|
89
|
+
throw new Error('LLM returned empty response');
|
|
90
|
+
}
|
|
91
|
+
return {
|
|
92
|
+
content: choice.message.content,
|
|
93
|
+
promptTokens: json.usage?.prompt_tokens,
|
|
94
|
+
completionTokens: json.usage?.completion_tokens,
|
|
95
|
+
};
|
|
96
|
+
}
|
|
97
|
+
catch (err) {
|
|
98
|
+
lastError = err;
|
|
99
|
+
// Network error — retry once
|
|
100
|
+
if (attempt === 0 && (err.code === 'ECONNREFUSED' || err.code === 'ETIMEDOUT' || err.message?.includes('fetch'))) {
|
|
101
|
+
await sleep(3000);
|
|
102
|
+
continue;
|
|
103
|
+
}
|
|
104
|
+
throw err;
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
throw lastError || new Error('LLM call failed after retries');
|
|
108
|
+
}
|
|
109
|
+
function sleep(ms) {
|
|
110
|
+
return new Promise(resolve => setTimeout(resolve, ms));
|
|
111
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Prompt Templates for Wiki Generation
|
|
3
|
+
*
|
|
4
|
+
* All prompts produce deterministic, source-grounded documentation.
|
|
5
|
+
* Templates use {{PLACEHOLDER}} substitution.
|
|
6
|
+
*/
|
|
7
|
+
export declare const GROUPING_SYSTEM_PROMPT = "You are a documentation architect. Given a list of source files with their exported symbols, group them into logical documentation modules.\n\nRules:\n- Each module should represent a cohesive feature, layer, or domain\n- Every file must appear in exactly one module\n- Module names should be human-readable (e.g. \"Authentication\", \"Database Layer\", \"API Routes\")\n- Aim for 5-15 modules for a typical project. Fewer for small projects, more for large ones\n- Group by functionality, not by file type or directory structure alone\n- Do NOT create modules for tests, configs, or non-source files";
|
|
8
|
+
export declare const GROUPING_USER_PROMPT = "Group these source files into documentation modules.\n\n**Files and their exports:**\n{{FILE_LIST}}\n\n**Directory structure:**\n{{DIRECTORY_TREE}}\n\nRespond with ONLY a JSON object mapping module names to file path arrays. No markdown, no explanation.\nExample format:\n{\n \"Authentication\": [\"src/auth/login.ts\", \"src/auth/session.ts\"],\n \"Database\": [\"src/db/connection.ts\", \"src/db/models.ts\"]\n}";
|
|
9
|
+
export declare const MODULE_SYSTEM_PROMPT = "You are a technical documentation writer. Write clear, developer-focused documentation for a code module.\n\nRules:\n- Reference actual function names, class names, and code patterns \u2014 do NOT invent APIs\n- Use the call graph and execution flow data for accuracy, but do NOT mechanically list every edge\n- Include Mermaid diagrams only when they genuinely help understanding. Keep them small (5-10 nodes max)\n- Structure the document however makes sense for this module \u2014 there is no mandatory format\n- Write for a developer who needs to understand and contribute to this code";
|
|
10
|
+
export declare const MODULE_USER_PROMPT = "Write documentation for the **{{MODULE_NAME}}** module.\n\n## Source Code\n\n{{SOURCE_CODE}}\n\n## Call Graph & Execution Flows (reference for accuracy)\n\nInternal calls: {{INTRA_CALLS}}\nOutgoing calls: {{OUTGOING_CALLS}}\nIncoming calls: {{INCOMING_CALLS}}\nExecution flows: {{PROCESSES}}\n\n---\n\nWrite comprehensive documentation for this module. Cover its purpose, how it works, its key components, and how it connects to the rest of the codebase. Use whatever structure best fits this module \u2014 you decide the sections and headings. Include a Mermaid diagram only if it genuinely clarifies the architecture.";
|
|
11
|
+
export declare const PARENT_SYSTEM_PROMPT = "You are a technical documentation writer. Write a summary page for a module that contains sub-modules. Synthesize the children's documentation \u2014 do not re-read source code.\n\nRules:\n- Reference actual components from the child modules\n- Focus on how the sub-modules work together, not repeating their individual docs\n- Keep it concise \u2014 the reader can click through to child pages for detail\n- Include a Mermaid diagram only if it genuinely clarifies how the sub-modules relate";
|
|
12
|
+
export declare const PARENT_USER_PROMPT = "Write documentation for the **{{MODULE_NAME}}** module, which contains these sub-modules:\n\n{{CHILDREN_DOCS}}\n\nCross-module calls: {{CROSS_MODULE_CALLS}}\nShared execution flows: {{CROSS_PROCESSES}}\n\n---\n\nWrite a concise overview of this module group. Explain its purpose, how the sub-modules fit together, and the key workflows that span them. Link to sub-module pages (e.g. `[Sub-module Name](sub-module-slug.md)`) rather than repeating their content. Use whatever structure fits best.";
|
|
13
|
+
export declare const OVERVIEW_SYSTEM_PROMPT = "You are a technical documentation writer. Write the top-level overview page for a repository wiki. This is the first page a new developer sees.\n\nRules:\n- Be clear and welcoming \u2014 this is the entry point to the entire codebase\n- Reference actual module names so readers can navigate to their docs\n- Include a high-level Mermaid architecture diagram showing only the most important modules and their relationships (max 10 nodes). A new dev should grasp it in 10 seconds\n- Do NOT create module index tables or list every module with descriptions \u2014 just link to module pages naturally within the text\n- Use the inter-module edges and execution flow data for accuracy, but do NOT dump them raw";
|
|
14
|
+
export declare const OVERVIEW_USER_PROMPT = "Write the overview page for this repository's wiki.\n\n## Project Info\n\n{{PROJECT_INFO}}\n\n## Module Summaries\n\n{{MODULE_SUMMARIES}}\n\n## Reference Data (for accuracy \u2014 do not reproduce verbatim)\n\nInter-module call edges: {{MODULE_EDGES}}\nKey system flows: {{TOP_PROCESSES}}\n\n---\n\nWrite a clear overview of this project: what it does, how it's architected, and the key end-to-end flows. Include a simple Mermaid architecture diagram (max 10 nodes, big-picture only). Link to module pages (e.g. `[Module Name](module-slug.md)`) naturally in the text rather than listing them in a table. If project config was provided, include brief setup instructions. Structure the page however reads best.";
|
|
15
|
+
/**
|
|
16
|
+
* Replace {{PLACEHOLDER}} tokens in a template string.
|
|
17
|
+
*/
|
|
18
|
+
export declare function fillTemplate(template: string, vars: Record<string, string>): string;
|
|
19
|
+
/**
|
|
20
|
+
* Format file list with exports for the grouping prompt.
|
|
21
|
+
*/
|
|
22
|
+
export declare function formatFileListForGrouping(files: Array<{
|
|
23
|
+
filePath: string;
|
|
24
|
+
symbols: Array<{
|
|
25
|
+
name: string;
|
|
26
|
+
type: string;
|
|
27
|
+
}>;
|
|
28
|
+
}>): string;
|
|
29
|
+
/**
|
|
30
|
+
* Build a directory tree string from file paths.
|
|
31
|
+
*/
|
|
32
|
+
export declare function formatDirectoryTree(filePaths: string[]): string;
|
|
33
|
+
/**
|
|
34
|
+
* Format call edges as readable text.
|
|
35
|
+
*/
|
|
36
|
+
export declare function formatCallEdges(edges: Array<{
|
|
37
|
+
fromFile: string;
|
|
38
|
+
fromName: string;
|
|
39
|
+
toFile: string;
|
|
40
|
+
toName: string;
|
|
41
|
+
}>): string;
|
|
42
|
+
/**
|
|
43
|
+
* Format process traces as readable text.
|
|
44
|
+
*/
|
|
45
|
+
export declare function formatProcesses(processes: Array<{
|
|
46
|
+
label: string;
|
|
47
|
+
type: string;
|
|
48
|
+
steps: Array<{
|
|
49
|
+
step: number;
|
|
50
|
+
name: string;
|
|
51
|
+
filePath: string;
|
|
52
|
+
}>;
|
|
53
|
+
}>): string;
|
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Prompt Templates for Wiki Generation
|
|
3
|
+
*
|
|
4
|
+
* All prompts produce deterministic, source-grounded documentation.
|
|
5
|
+
* Templates use {{PLACEHOLDER}} substitution.
|
|
6
|
+
*/
|
|
7
|
+
// ─── Grouping Prompt ──────────────────────────────────────────────────
|
|
8
|
+
export const GROUPING_SYSTEM_PROMPT = `You are a documentation architect. Given a list of source files with their exported symbols, group them into logical documentation modules.
|
|
9
|
+
|
|
10
|
+
Rules:
|
|
11
|
+
- Each module should represent a cohesive feature, layer, or domain
|
|
12
|
+
- Every file must appear in exactly one module
|
|
13
|
+
- Module names should be human-readable (e.g. "Authentication", "Database Layer", "API Routes")
|
|
14
|
+
- Aim for 5-15 modules for a typical project. Fewer for small projects, more for large ones
|
|
15
|
+
- Group by functionality, not by file type or directory structure alone
|
|
16
|
+
- Do NOT create modules for tests, configs, or non-source files`;
|
|
17
|
+
export const GROUPING_USER_PROMPT = `Group these source files into documentation modules.
|
|
18
|
+
|
|
19
|
+
**Files and their exports:**
|
|
20
|
+
{{FILE_LIST}}
|
|
21
|
+
|
|
22
|
+
**Directory structure:**
|
|
23
|
+
{{DIRECTORY_TREE}}
|
|
24
|
+
|
|
25
|
+
Respond with ONLY a JSON object mapping module names to file path arrays. No markdown, no explanation.
|
|
26
|
+
Example format:
|
|
27
|
+
{
|
|
28
|
+
"Authentication": ["src/auth/login.ts", "src/auth/session.ts"],
|
|
29
|
+
"Database": ["src/db/connection.ts", "src/db/models.ts"]
|
|
30
|
+
}`;
|
|
31
|
+
// ─── Leaf Module Prompt ───────────────────────────────────────────────
|
|
32
|
+
export const MODULE_SYSTEM_PROMPT = `You are a technical documentation writer. Write clear, developer-focused documentation for a code module.
|
|
33
|
+
|
|
34
|
+
Rules:
|
|
35
|
+
- Reference actual function names, class names, and code patterns — do NOT invent APIs
|
|
36
|
+
- Use the call graph and execution flow data for accuracy, but do NOT mechanically list every edge
|
|
37
|
+
- Include Mermaid diagrams only when they genuinely help understanding. Keep them small (5-10 nodes max)
|
|
38
|
+
- Structure the document however makes sense for this module — there is no mandatory format
|
|
39
|
+
- Write for a developer who needs to understand and contribute to this code`;
|
|
40
|
+
export const MODULE_USER_PROMPT = `Write documentation for the **{{MODULE_NAME}}** module.
|
|
41
|
+
|
|
42
|
+
## Source Code
|
|
43
|
+
|
|
44
|
+
{{SOURCE_CODE}}
|
|
45
|
+
|
|
46
|
+
## Call Graph & Execution Flows (reference for accuracy)
|
|
47
|
+
|
|
48
|
+
Internal calls: {{INTRA_CALLS}}
|
|
49
|
+
Outgoing calls: {{OUTGOING_CALLS}}
|
|
50
|
+
Incoming calls: {{INCOMING_CALLS}}
|
|
51
|
+
Execution flows: {{PROCESSES}}
|
|
52
|
+
|
|
53
|
+
---
|
|
54
|
+
|
|
55
|
+
Write comprehensive documentation for this module. Cover its purpose, how it works, its key components, and how it connects to the rest of the codebase. Use whatever structure best fits this module — you decide the sections and headings. Include a Mermaid diagram only if it genuinely clarifies the architecture.`;
|
|
56
|
+
// ─── Parent Module Prompt ─────────────────────────────────────────────
|
|
57
|
+
export const PARENT_SYSTEM_PROMPT = `You are a technical documentation writer. Write a summary page for a module that contains sub-modules. Synthesize the children's documentation — do not re-read source code.
|
|
58
|
+
|
|
59
|
+
Rules:
|
|
60
|
+
- Reference actual components from the child modules
|
|
61
|
+
- Focus on how the sub-modules work together, not repeating their individual docs
|
|
62
|
+
- Keep it concise — the reader can click through to child pages for detail
|
|
63
|
+
- Include a Mermaid diagram only if it genuinely clarifies how the sub-modules relate`;
|
|
64
|
+
export const PARENT_USER_PROMPT = `Write documentation for the **{{MODULE_NAME}}** module, which contains these sub-modules:
|
|
65
|
+
|
|
66
|
+
{{CHILDREN_DOCS}}
|
|
67
|
+
|
|
68
|
+
Cross-module calls: {{CROSS_MODULE_CALLS}}
|
|
69
|
+
Shared execution flows: {{CROSS_PROCESSES}}
|
|
70
|
+
|
|
71
|
+
---
|
|
72
|
+
|
|
73
|
+
Write a concise overview of this module group. Explain its purpose, how the sub-modules fit together, and the key workflows that span them. Link to sub-module pages (e.g. \`[Sub-module Name](sub-module-slug.md)\`) rather than repeating their content. Use whatever structure fits best.`;
|
|
74
|
+
// ─── Overview Prompt ──────────────────────────────────────────────────
|
|
75
|
+
export const OVERVIEW_SYSTEM_PROMPT = `You are a technical documentation writer. Write the top-level overview page for a repository wiki. This is the first page a new developer sees.
|
|
76
|
+
|
|
77
|
+
Rules:
|
|
78
|
+
- Be clear and welcoming — this is the entry point to the entire codebase
|
|
79
|
+
- Reference actual module names so readers can navigate to their docs
|
|
80
|
+
- Include a high-level Mermaid architecture diagram showing only the most important modules and their relationships (max 10 nodes). A new dev should grasp it in 10 seconds
|
|
81
|
+
- Do NOT create module index tables or list every module with descriptions — just link to module pages naturally within the text
|
|
82
|
+
- Use the inter-module edges and execution flow data for accuracy, but do NOT dump them raw`;
|
|
83
|
+
export const OVERVIEW_USER_PROMPT = `Write the overview page for this repository's wiki.
|
|
84
|
+
|
|
85
|
+
## Project Info
|
|
86
|
+
|
|
87
|
+
{{PROJECT_INFO}}
|
|
88
|
+
|
|
89
|
+
## Module Summaries
|
|
90
|
+
|
|
91
|
+
{{MODULE_SUMMARIES}}
|
|
92
|
+
|
|
93
|
+
## Reference Data (for accuracy — do not reproduce verbatim)
|
|
94
|
+
|
|
95
|
+
Inter-module call edges: {{MODULE_EDGES}}
|
|
96
|
+
Key system flows: {{TOP_PROCESSES}}
|
|
97
|
+
|
|
98
|
+
---
|
|
99
|
+
|
|
100
|
+
Write a clear overview of this project: what it does, how it's architected, and the key end-to-end flows. Include a simple Mermaid architecture diagram (max 10 nodes, big-picture only). Link to module pages (e.g. \`[Module Name](module-slug.md)\`) naturally in the text rather than listing them in a table. If project config was provided, include brief setup instructions. Structure the page however reads best.`;
|
|
101
|
+
// ─── Template Substitution Helper ─────────────────────────────────────
|
|
102
|
+
/**
|
|
103
|
+
* Replace {{PLACEHOLDER}} tokens in a template string.
|
|
104
|
+
*/
|
|
105
|
+
export function fillTemplate(template, vars) {
|
|
106
|
+
let result = template;
|
|
107
|
+
for (const [key, value] of Object.entries(vars)) {
|
|
108
|
+
result = result.replaceAll(`{{${key}}}`, value);
|
|
109
|
+
}
|
|
110
|
+
return result;
|
|
111
|
+
}
|
|
112
|
+
// ─── Formatting Helpers ───────────────────────────────────────────────
|
|
113
|
+
/**
|
|
114
|
+
* Format file list with exports for the grouping prompt.
|
|
115
|
+
*/
|
|
116
|
+
export function formatFileListForGrouping(files) {
|
|
117
|
+
return files
|
|
118
|
+
.map(f => {
|
|
119
|
+
const exports = f.symbols.length > 0
|
|
120
|
+
? f.symbols.map(s => `${s.name} (${s.type})`).join(', ')
|
|
121
|
+
: 'no exports';
|
|
122
|
+
return `- ${f.filePath}: ${exports}`;
|
|
123
|
+
})
|
|
124
|
+
.join('\n');
|
|
125
|
+
}
|
|
126
|
+
/**
|
|
127
|
+
* Build a directory tree string from file paths.
|
|
128
|
+
*/
|
|
129
|
+
export function formatDirectoryTree(filePaths) {
|
|
130
|
+
const dirs = new Set();
|
|
131
|
+
for (const fp of filePaths) {
|
|
132
|
+
const parts = fp.replace(/\\/g, '/').split('/');
|
|
133
|
+
for (let i = 1; i < parts.length; i++) {
|
|
134
|
+
dirs.add(parts.slice(0, i).join('/'));
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
const sorted = Array.from(dirs).sort();
|
|
138
|
+
if (sorted.length === 0)
|
|
139
|
+
return '(flat structure)';
|
|
140
|
+
return sorted.slice(0, 50).join('\n') + (sorted.length > 50 ? `\n... and ${sorted.length - 50} more directories` : '');
|
|
141
|
+
}
|
|
142
|
+
/**
|
|
143
|
+
* Format call edges as readable text.
|
|
144
|
+
*/
|
|
145
|
+
export function formatCallEdges(edges) {
|
|
146
|
+
if (edges.length === 0)
|
|
147
|
+
return 'None';
|
|
148
|
+
return edges
|
|
149
|
+
.slice(0, 30)
|
|
150
|
+
.map(e => `${e.fromName} (${shortPath(e.fromFile)}) → ${e.toName} (${shortPath(e.toFile)})`)
|
|
151
|
+
.join('\n');
|
|
152
|
+
}
|
|
153
|
+
/**
|
|
154
|
+
* Format process traces as readable text.
|
|
155
|
+
*/
|
|
156
|
+
export function formatProcesses(processes) {
|
|
157
|
+
if (processes.length === 0)
|
|
158
|
+
return 'No execution flows detected for this module.';
|
|
159
|
+
return processes
|
|
160
|
+
.map(p => {
|
|
161
|
+
const stepsText = p.steps
|
|
162
|
+
.map(s => ` ${s.step}. ${s.name} (${shortPath(s.filePath)})`)
|
|
163
|
+
.join('\n');
|
|
164
|
+
return `**${p.label}** (${p.type}):\n${stepsText}`;
|
|
165
|
+
})
|
|
166
|
+
.join('\n\n');
|
|
167
|
+
}
|
|
168
|
+
/**
|
|
169
|
+
* Shorten a file path for readability.
|
|
170
|
+
*/
|
|
171
|
+
function shortPath(fp) {
|
|
172
|
+
const parts = fp.replace(/\\/g, '/').split('/');
|
|
173
|
+
return parts.length > 3 ? parts.slice(-3).join('/') : fp;
|
|
174
|
+
}
|
|
@@ -27,8 +27,10 @@ export const initEmbedder = async () => {
|
|
|
27
27
|
try {
|
|
28
28
|
env.allowLocalModels = false;
|
|
29
29
|
console.error('GitNexus: Loading embedding model (first search may take a moment)...');
|
|
30
|
-
// Try
|
|
31
|
-
const
|
|
30
|
+
// Try GPU first (DirectML on Windows, CUDA on Linux), fall back to CPU
|
|
31
|
+
const isWindows = process.platform === 'win32';
|
|
32
|
+
const gpuDevice = isWindows ? 'dml' : 'cuda';
|
|
33
|
+
const devicesToTry = [gpuDevice, 'cpu'];
|
|
32
34
|
for (const device of devicesToTry) {
|
|
33
35
|
try {
|
|
34
36
|
// Silence stdout during model load — ONNX Runtime and transformers.js
|
|
@@ -6,7 +6,8 @@
|
|
|
6
6
|
* after idle timeout or when pool exceeds max size (LRU).
|
|
7
7
|
*/
|
|
8
8
|
/**
|
|
9
|
-
* Initialize (or reuse) a connection for a specific repo
|
|
9
|
+
* Initialize (or reuse) a connection for a specific repo.
|
|
10
|
+
* Retries on lock errors (e.g., when `gitnexus analyze` is running).
|
|
10
11
|
*/
|
|
11
12
|
export declare const initKuzu: (repoId: string, dbPath: string) => Promise<void>;
|
|
12
13
|
/**
|
|
@@ -65,8 +65,11 @@ function closeOne(repoId) {
|
|
|
65
65
|
catch { }
|
|
66
66
|
pool.delete(repoId);
|
|
67
67
|
}
|
|
68
|
+
const LOCK_RETRY_ATTEMPTS = 3;
|
|
69
|
+
const LOCK_RETRY_DELAY_MS = 2000;
|
|
68
70
|
/**
|
|
69
|
-
* Initialize (or reuse) a connection for a specific repo
|
|
71
|
+
* Initialize (or reuse) a connection for a specific repo.
|
|
72
|
+
* Retries on lock errors (e.g., when `gitnexus analyze` is running).
|
|
70
73
|
*/
|
|
71
74
|
export const initKuzu = async (repoId, dbPath) => {
|
|
72
75
|
const existing = pool.get(repoId);
|
|
@@ -82,21 +85,38 @@ export const initKuzu = async (repoId, dbPath) => {
|
|
|
82
85
|
throw new Error(`KuzuDB not found at ${dbPath}. Run: gitnexus analyze`);
|
|
83
86
|
}
|
|
84
87
|
evictLRU();
|
|
85
|
-
//
|
|
86
|
-
//
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
let
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
88
|
+
// Open in read-only mode — MCP server never writes to the database.
|
|
89
|
+
// This allows multiple MCP server instances to read concurrently, and
|
|
90
|
+
// avoids lock conflicts when `gitnexus analyze` is writing.
|
|
91
|
+
let lastError = null;
|
|
92
|
+
for (let attempt = 1; attempt <= LOCK_RETRY_ATTEMPTS; attempt++) {
|
|
93
|
+
// Silence stdout during KuzuDB init — native module may write to stdout
|
|
94
|
+
// which corrupts the MCP stdio protocol.
|
|
95
|
+
const origWrite = process.stdout.write;
|
|
96
|
+
process.stdout.write = (() => true);
|
|
97
|
+
try {
|
|
98
|
+
const db = new kuzu.Database(dbPath, 0, // bufferManagerSize (default)
|
|
99
|
+
false, // enableCompression (default)
|
|
100
|
+
true);
|
|
101
|
+
const conn = new kuzu.Connection(db);
|
|
102
|
+
process.stdout.write = origWrite;
|
|
103
|
+
pool.set(repoId, { db, conn, lastUsed: Date.now(), dbPath });
|
|
104
|
+
ensureIdleTimer();
|
|
105
|
+
return;
|
|
106
|
+
}
|
|
107
|
+
catch (err) {
|
|
108
|
+
process.stdout.write = origWrite;
|
|
109
|
+
lastError = err instanceof Error ? err : new Error(String(err));
|
|
110
|
+
const isLockError = lastError.message.includes('Could not set lock')
|
|
111
|
+
|| lastError.message.includes('lock');
|
|
112
|
+
if (!isLockError || attempt === LOCK_RETRY_ATTEMPTS)
|
|
113
|
+
break;
|
|
114
|
+
// Wait before retrying — analyze may be mid-rebuild
|
|
115
|
+
await new Promise(resolve => setTimeout(resolve, LOCK_RETRY_DELAY_MS * attempt));
|
|
116
|
+
}
|
|
97
117
|
}
|
|
98
|
-
|
|
99
|
-
|
|
118
|
+
throw new Error(`KuzuDB unavailable for ${repoId}. Another process may be rebuilding the index. ` +
|
|
119
|
+
`Retry later. (${lastError?.message || 'unknown error'})`);
|
|
100
120
|
};
|
|
101
121
|
/**
|
|
102
122
|
* Execute a query on a specific repo's connection
|
|
@@ -139,8 +139,15 @@ export class LocalBackend {
|
|
|
139
139
|
const handle = this.repos.get(repoId);
|
|
140
140
|
if (!handle)
|
|
141
141
|
throw new Error(`Unknown repo: ${repoId}`);
|
|
142
|
-
|
|
143
|
-
|
|
142
|
+
try {
|
|
143
|
+
await initKuzu(repoId, handle.kuzuPath);
|
|
144
|
+
this.initializedRepos.add(repoId);
|
|
145
|
+
}
|
|
146
|
+
catch (err) {
|
|
147
|
+
// If lock error, mark as not initialized so next call retries
|
|
148
|
+
this.initializedRepos.delete(repoId);
|
|
149
|
+
throw err;
|
|
150
|
+
}
|
|
144
151
|
}
|
|
145
152
|
// ─── Public Getters ──────────────────────────────────────────────
|
|
146
153
|
/**
|
package/dist/mcp/server.js
CHANGED
package/dist/storage/git.d.ts
CHANGED
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
export declare const isGitRepo: (repoPath: string) => boolean;
|
|
2
2
|
export declare const getCurrentCommit: (repoPath: string) => string;
|
|
3
|
-
export declare const getStatusPorcelain: (repoPath: string) => string;
|
|
4
3
|
/**
|
|
5
4
|
* Find the git repository root from any path inside the repo
|
|
6
5
|
*/
|
package/dist/storage/git.js
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { execSync } from 'child_process';
|
|
2
|
+
// Git utilities for repository detection, commit tracking, and diff analysis
|
|
2
3
|
export const isGitRepo = (repoPath) => {
|
|
3
4
|
try {
|
|
4
5
|
execSync('git rev-parse --is-inside-work-tree', { cwd: repoPath, stdio: 'ignore' });
|
|
@@ -16,14 +17,6 @@ export const getCurrentCommit = (repoPath) => {
|
|
|
16
17
|
return '';
|
|
17
18
|
}
|
|
18
19
|
};
|
|
19
|
-
export const getStatusPorcelain = (repoPath) => {
|
|
20
|
-
try {
|
|
21
|
-
return execSync('git status --porcelain', { cwd: repoPath }).toString();
|
|
22
|
-
}
|
|
23
|
-
catch {
|
|
24
|
-
return '';
|
|
25
|
-
}
|
|
26
|
-
};
|
|
27
20
|
/**
|
|
28
21
|
* Find the git repository root from any path inside the repo
|
|
29
22
|
*/
|
|
@@ -101,3 +101,20 @@ export declare const unregisterRepo: (repoPath: string) => Promise<void>;
|
|
|
101
101
|
export declare const listRegisteredRepos: (opts?: {
|
|
102
102
|
validate?: boolean;
|
|
103
103
|
}) => Promise<RegistryEntry[]>;
|
|
104
|
+
export interface CLIConfig {
|
|
105
|
+
apiKey?: string;
|
|
106
|
+
model?: string;
|
|
107
|
+
baseUrl?: string;
|
|
108
|
+
}
|
|
109
|
+
/**
|
|
110
|
+
* Get the path to the global CLI config file
|
|
111
|
+
*/
|
|
112
|
+
export declare const getGlobalConfigPath: () => string;
|
|
113
|
+
/**
|
|
114
|
+
* Load CLI config from ~/.gitnexus/config.json
|
|
115
|
+
*/
|
|
116
|
+
export declare const loadCLIConfig: () => Promise<CLIConfig>;
|
|
117
|
+
/**
|
|
118
|
+
* Save CLI config to ~/.gitnexus/config.json
|
|
119
|
+
*/
|
|
120
|
+
export declare const saveCLIConfig: (config: CLIConfig) => Promise<void>;
|
|
@@ -203,3 +203,29 @@ export const listRegisteredRepos = async (opts) => {
|
|
|
203
203
|
}
|
|
204
204
|
return valid;
|
|
205
205
|
};
|
|
206
|
+
/**
|
|
207
|
+
* Get the path to the global CLI config file
|
|
208
|
+
*/
|
|
209
|
+
export const getGlobalConfigPath = () => {
|
|
210
|
+
return path.join(getGlobalDir(), 'config.json');
|
|
211
|
+
};
|
|
212
|
+
/**
|
|
213
|
+
* Load CLI config from ~/.gitnexus/config.json
|
|
214
|
+
*/
|
|
215
|
+
export const loadCLIConfig = async () => {
|
|
216
|
+
try {
|
|
217
|
+
const raw = await fs.readFile(getGlobalConfigPath(), 'utf-8');
|
|
218
|
+
return JSON.parse(raw);
|
|
219
|
+
}
|
|
220
|
+
catch {
|
|
221
|
+
return {};
|
|
222
|
+
}
|
|
223
|
+
};
|
|
224
|
+
/**
|
|
225
|
+
* Save CLI config to ~/.gitnexus/config.json
|
|
226
|
+
*/
|
|
227
|
+
export const saveCLIConfig = async (config) => {
|
|
228
|
+
const dir = getGlobalDir();
|
|
229
|
+
await fs.mkdir(dir, { recursive: true });
|
|
230
|
+
await fs.writeFile(getGlobalConfigPath(), JSON.stringify(config, null, 2), 'utf-8');
|
|
231
|
+
};
|
package/package.json
CHANGED