@khiem_enhance/ai-doc-agent 0.1.0 → 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli.js +17 -1
- package/dist/commands/generate.js +31 -22
- package/dist/llm/openaiClient.js +56 -10
- package/package.json +6 -2
package/dist/cli.js
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
2
|
"use strict";
|
|
3
3
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
4
|
+
require("dotenv/config");
|
|
4
5
|
const commander_1 = require("commander");
|
|
5
6
|
const generate_1 = require("./commands/generate");
|
|
6
7
|
const program = new commander_1.Command();
|
|
@@ -12,5 +13,20 @@ program
|
|
|
12
13
|
.command("generate")
|
|
13
14
|
.option("--since <commit>", "Only analyze changes since commit")
|
|
14
15
|
.option("--output <dir>", "Docs output directory", "docs")
|
|
15
|
-
.
|
|
16
|
+
.option("--only <part>", "architecture|modules|all", "all")
|
|
17
|
+
.option("--max-files <n>", "Max files included per LLM request", "8")
|
|
18
|
+
.option("--max-chars <n>", "Max characters included per LLM request", "60000")
|
|
19
|
+
.action(async (opts) => {
|
|
20
|
+
// normalize options
|
|
21
|
+
const only = String(opts.only ?? "all");
|
|
22
|
+
const maxFiles = Number(opts.maxFiles ?? 8);
|
|
23
|
+
const maxChars = Number(opts.maxChars ?? 60000);
|
|
24
|
+
await (0, generate_1.generateDocs)({
|
|
25
|
+
since: opts.since,
|
|
26
|
+
output: opts.output,
|
|
27
|
+
only: only ?? "all",
|
|
28
|
+
maxFiles: Number.isFinite(maxFiles) ? maxFiles : 8,
|
|
29
|
+
maxChars: Number.isFinite(maxChars) ? maxChars : 60000,
|
|
30
|
+
});
|
|
31
|
+
});
|
|
16
32
|
program.parse();
|
|
@@ -12,34 +12,43 @@ const modules_1 = require("../analyzers/modules");
|
|
|
12
12
|
const markdownWriter_1 = require("../writers/markdownWriter");
|
|
13
13
|
const gitUtils_1 = require("../git/gitUtils");
|
|
14
14
|
const moduleDetector_1 = require("../scanner/moduleDetector");
|
|
15
|
+
const truncate = (s, maxChars) => s.length > maxChars ? s.slice(0, maxChars) + "\n\n...<truncated>" : s;
|
|
15
16
|
async function generateDocs(options) {
|
|
16
17
|
const root = process.cwd();
|
|
17
18
|
const files = options.since
|
|
18
|
-
? (0, gitUtils_1.getChangedFiles)(options.since).map(f => path_1.default.join(root, f))
|
|
19
|
+
? (0, gitUtils_1.getChangedFiles)(options.since).map((f) => path_1.default.join(root, f))
|
|
19
20
|
: await (0, fileScanner_1.scanProject)(root);
|
|
21
|
+
const only = options.only ?? "all";
|
|
22
|
+
const maxFiles = Math.max(1, options.maxFiles ?? 8);
|
|
23
|
+
const maxChars = Math.max(5000, options.maxChars ?? 60000);
|
|
20
24
|
// ---------- Architecture ----------
|
|
21
|
-
|
|
22
|
-
.map(f => path_1.default.relative(root, f))
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
.join("\n\n");
|
|
28
|
-
const architecture = await (0, architecture_1.generateArchitectureDoc)(tree, architectureSource);
|
|
29
|
-
(0, markdownWriter_1.writeDoc)(options.output, "architecture.md", architecture);
|
|
30
|
-
// ---------- Modules ----------
|
|
31
|
-
const modules = (0, moduleDetector_1.detectModules)(files, root);
|
|
32
|
-
for (const [moduleName, moduleFiles] of Object.entries(modules)) {
|
|
33
|
-
const fileList = moduleFiles
|
|
34
|
-
.map(f => path_1.default.relative(root, f))
|
|
35
|
-
.join("\n");
|
|
36
|
-
const source = moduleFiles
|
|
37
|
-
.slice(0, 20)
|
|
38
|
-
.map(f => `FILE: ${f}\n${(0, contentReader_1.readFile)(f)}`)
|
|
25
|
+
if (only === "architecture" || only === "all") {
|
|
26
|
+
const tree = files.map((f) => path_1.default.relative(root, f)).join("\n");
|
|
27
|
+
// IMPORTANT: limit payload by file count + maxChars
|
|
28
|
+
const architectureSourceRaw = files
|
|
29
|
+
.slice(0, maxFiles)
|
|
30
|
+
.map((f) => `FILE: ${f}\n${(0, contentReader_1.readFile)(f)}`)
|
|
39
31
|
.join("\n\n");
|
|
40
|
-
const
|
|
41
|
-
(0,
|
|
42
|
-
|
|
32
|
+
const architectureSource = truncate(architectureSourceRaw, maxChars);
|
|
33
|
+
const architecture = await (0, architecture_1.generateArchitectureDoc)(tree, architectureSource);
|
|
34
|
+
(0, markdownWriter_1.writeDoc)(options.output, "architecture.md", architecture);
|
|
35
|
+
console.log("📄 Architecture doc generated");
|
|
36
|
+
}
|
|
37
|
+
// ---------- Modules ----------
|
|
38
|
+
if (only === "modules" || only === "all") {
|
|
39
|
+
const modules = (0, moduleDetector_1.detectModules)(files, root);
|
|
40
|
+
for (const [moduleName, moduleFiles] of Object.entries(modules)) {
|
|
41
|
+
const fileList = moduleFiles.map((f) => path_1.default.relative(root, f)).join("\n");
|
|
42
|
+
// IMPORTANT: limit payload by file count + maxChars
|
|
43
|
+
const sourceRaw = moduleFiles
|
|
44
|
+
.slice(0, maxFiles)
|
|
45
|
+
.map((f) => `FILE: ${f}\n${(0, contentReader_1.readFile)(f)}`)
|
|
46
|
+
.join("\n\n");
|
|
47
|
+
const source = truncate(sourceRaw, maxChars);
|
|
48
|
+
const doc = await (0, modules_1.generateModuleDocs)(moduleName, fileList, source);
|
|
49
|
+
(0, markdownWriter_1.writeDoc)(path_1.default.join(options.output, "modules"), `${moduleName}.md`, doc);
|
|
50
|
+
console.log(`📄 Module doc generated: ${moduleName}`);
|
|
51
|
+
}
|
|
43
52
|
}
|
|
44
53
|
console.log("✅ Docs generation completed");
|
|
45
54
|
}
|
package/dist/llm/openaiClient.js
CHANGED
|
@@ -6,14 +6,60 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
6
6
|
exports.openai = void 0;
|
|
7
7
|
exports.askLLM = askLLM;
|
|
8
8
|
const openai_1 = __importDefault(require("openai"));
|
|
9
|
-
const
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
9
|
+
const apiKey = process.env.OPENAI_API_KEY;
|
|
10
|
+
if (!apiKey) {
|
|
11
|
+
throw new Error("Missing OPENAI_API_KEY.\n" +
|
|
12
|
+
"Set it before running:\n" +
|
|
13
|
+
' export OPENAI_API_KEY="sk-xxxx"\n');
|
|
14
|
+
}
|
|
15
|
+
exports.openai = new openai_1.default({ apiKey });
|
|
16
|
+
const sleep = (ms) => new Promise((r) => setTimeout(r, ms));
|
|
17
|
+
/**
|
|
18
|
+
* askLLM with:
|
|
19
|
+
* - retry on 429 using retry-after headers
|
|
20
|
+
* - fail gracefully if retry-after is extremely long (token budget exhausted)
|
|
21
|
+
*/
|
|
22
|
+
async function askLLM(prompt, maxRetries = 5) {
|
|
23
|
+
const model = process.env.MODEL || "gpt-4.1-mini";
|
|
24
|
+
let attempt = 0;
|
|
25
|
+
while (true) {
|
|
26
|
+
try {
|
|
27
|
+
const res = await exports.openai.chat.completions.create({
|
|
28
|
+
model,
|
|
29
|
+
messages: [{ role: "user", content: prompt }],
|
|
30
|
+
});
|
|
31
|
+
return res.choices[0].message.content ?? "";
|
|
32
|
+
}
|
|
33
|
+
catch (err) {
|
|
34
|
+
attempt++;
|
|
35
|
+
const status = err?.status;
|
|
36
|
+
const headers = err?.headers || {};
|
|
37
|
+
const retryAfterMs = Number(headers["retry-after-ms"]) ||
|
|
38
|
+
(Number(headers["retry-after"]) || 0) * 1000;
|
|
39
|
+
if (status === 429) {
|
|
40
|
+
// If retry-after is huge => org quota/token budget exhausted for a long window.
|
|
41
|
+
if (retryAfterMs && retryAfterMs > 120000) {
|
|
42
|
+
throw new Error([
|
|
43
|
+
"Rate limited (429) due to token/request limits.",
|
|
44
|
+
`Retry-After is very long: ~${Math.ceil(retryAfterMs / 1000)}s.`,
|
|
45
|
+
"",
|
|
46
|
+
"Fix suggestions:",
|
|
47
|
+
"- Reduce input size:",
|
|
48
|
+
" ai-doc-agent generate --only architecture --max-files 5 --max-chars 30000",
|
|
49
|
+
"- Or run modules separately:",
|
|
50
|
+
" ai-doc-agent generate --only modules --max-files 4 --max-chars 25000",
|
|
51
|
+
"- Or increase your OpenAI limits / add billing on your OpenAI account.",
|
|
52
|
+
].join("\n"));
|
|
53
|
+
}
|
|
54
|
+
if (attempt <= maxRetries) {
|
|
55
|
+
const wait = retryAfterMs || 20000;
|
|
56
|
+
console.warn(`⚠️ Rate limited (429). Retrying in ${Math.ceil(wait / 1000)}s... (${attempt}/${maxRetries})`);
|
|
57
|
+
await sleep(wait);
|
|
58
|
+
continue;
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
// Non-429 or exceeded retries
|
|
62
|
+
throw err;
|
|
63
|
+
}
|
|
64
|
+
}
|
|
19
65
|
}
|
package/package.json
CHANGED
|
@@ -1,13 +1,17 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@khiem_enhance/ai-doc-agent",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.3",
|
|
4
4
|
"description": "AI-powered documentation generator from source code",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"bin": {
|
|
7
7
|
"ai-doc-agent": "dist/cli.js"
|
|
8
8
|
},
|
|
9
9
|
"main": "dist/cli.js",
|
|
10
|
-
"files": [
|
|
10
|
+
"files": [
|
|
11
|
+
"dist",
|
|
12
|
+
"README.md",
|
|
13
|
+
"LICENSE"
|
|
14
|
+
],
|
|
11
15
|
"scripts": {
|
|
12
16
|
"build": "tsc",
|
|
13
17
|
"prepublishOnly": "npm run build"
|