@khiem_enhance/ai-doc-agent 0.1.1 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.js CHANGED
@@ -1,6 +1,7 @@
1
1
  #!/usr/bin/env node
2
2
  "use strict";
3
3
  Object.defineProperty(exports, "__esModule", { value: true });
4
+ require("dotenv/config");
4
5
  const commander_1 = require("commander");
5
6
  const generate_1 = require("./commands/generate");
6
7
  const program = new commander_1.Command();
@@ -10,8 +11,22 @@ program
10
11
  .version("0.1.0");
11
12
  program
12
13
  .command("generate")
13
- .option("--only <part>", "architecture|modules", "architecture")
14
- .option("--output <dir>", "Docs output directory", "docs")
15
14
  .option("--since <commit>", "Only analyze changes since commit")
16
- .action(generate_1.generateDocs);
15
+ .option("--output <dir>", "Docs output directory", "docs")
16
+ .option("--only <part>", "architecture|modules|all", "all")
17
+ .option("--max-files <n>", "Max files included per LLM request", "8")
18
+ .option("--max-chars <n>", "Max characters included per LLM request", "60000")
19
+ .action(async (opts) => {
20
+ // normalize options
21
+ const only = String(opts.only ?? "all");
22
+ const maxFiles = Number(opts.maxFiles ?? 8);
23
+ const maxChars = Number(opts.maxChars ?? 60000);
24
+ await (0, generate_1.generateDocs)({
25
+ since: opts.since,
26
+ output: opts.output,
27
+ only: only ?? "all",
28
+ maxFiles: Number.isFinite(maxFiles) ? maxFiles : 8,
29
+ maxChars: Number.isFinite(maxChars) ? maxChars : 60000,
30
+ });
31
+ });
17
32
  program.parse();
@@ -12,35 +12,43 @@ const modules_1 = require("../analyzers/modules");
12
12
  const markdownWriter_1 = require("../writers/markdownWriter");
13
13
  const gitUtils_1 = require("../git/gitUtils");
14
14
  const moduleDetector_1 = require("../scanner/moduleDetector");
15
- const sleep = (ms) => new Promise((r) => setTimeout(r, ms));
15
+ const truncate = (s, maxChars) => s.length > maxChars ? s.slice(0, maxChars) + "\n\n...<truncated>" : s;
16
16
  async function generateDocs(options) {
17
17
  const root = process.cwd();
18
18
  const files = options.since
19
19
  ? (0, gitUtils_1.getChangedFiles)(options.since).map((f) => path_1.default.join(root, f))
20
20
  : await (0, fileScanner_1.scanProject)(root);
21
+ const only = options.only ?? "all";
22
+ const maxFiles = Math.max(1, options.maxFiles ?? 8);
23
+ const maxChars = Math.max(5000, options.maxChars ?? 60000);
21
24
  // ---------- Architecture ----------
22
- const tree = files.map((f) => path_1.default.relative(root, f)).join("\n");
23
- const architectureSource = files
24
- .slice(0, 25)
25
- .map((f) => `FILE: ${f}\n${(0, contentReader_1.readFile)(f)}`)
26
- .join("\n\n");
27
- const architecture = await (0, architecture_1.generateArchitectureDoc)(tree, architectureSource);
28
- (0, markdownWriter_1.writeDoc)(options.output, "architecture.md", architecture);
29
- // ✅ Throttle để không chạm RPM ngay sau architecture
30
- await sleep(22000);
31
- // ---------- Modules ----------
32
- const modules = (0, moduleDetector_1.detectModules)(files, root);
33
- for (const [moduleName, moduleFiles] of Object.entries(modules)) {
34
- const fileList = moduleFiles.map((f) => path_1.default.relative(root, f)).join("\n");
35
- const source = moduleFiles
36
- .slice(0, 20)
25
+ if (only === "architecture" || only === "all") {
26
+ const tree = files.map((f) => path_1.default.relative(root, f)).join("\n");
27
+ // IMPORTANT: limit payload by file count + maxChars
28
+ const architectureSourceRaw = files
29
+ .slice(0, maxFiles)
37
30
  .map((f) => `FILE: ${f}\n${(0, contentReader_1.readFile)(f)}`)
38
31
  .join("\n\n");
39
- const doc = await (0, modules_1.generateModuleDocs)(moduleName, fileList, source);
40
- (0, markdownWriter_1.writeDoc)(path_1.default.join(options.output, "modules"), `${moduleName}.md`, doc);
41
- console.log(`📄 Module doc generated: ${moduleName}`);
42
- // Throttle giữa các module để giữ < 3 request/min
43
- await sleep(22000);
32
+ const architectureSource = truncate(architectureSourceRaw, maxChars);
33
+ const architecture = await (0, architecture_1.generateArchitectureDoc)(tree, architectureSource);
34
+ (0, markdownWriter_1.writeDoc)(options.output, "architecture.md", architecture);
35
+ console.log("📄 Architecture doc generated");
36
+ }
37
+ // ---------- Modules ----------
38
+ if (only === "modules" || only === "all") {
39
+ const modules = (0, moduleDetector_1.detectModules)(files, root);
40
+ for (const [moduleName, moduleFiles] of Object.entries(modules)) {
41
+ const fileList = moduleFiles.map((f) => path_1.default.relative(root, f)).join("\n");
42
+ // IMPORTANT: limit payload by file count + maxChars
43
+ const sourceRaw = moduleFiles
44
+ .slice(0, maxFiles)
45
+ .map((f) => `FILE: ${f}\n${(0, contentReader_1.readFile)(f)}`)
46
+ .join("\n\n");
47
+ const source = truncate(sourceRaw, maxChars);
48
+ const doc = await (0, modules_1.generateModuleDocs)(moduleName, fileList, source);
49
+ (0, markdownWriter_1.writeDoc)(path_1.default.join(options.output, "modules"), `${moduleName}.md`, doc);
50
+ console.log(`📄 Module doc generated: ${moduleName}`);
51
+ }
44
52
  }
45
53
  console.log("✅ Docs generation completed");
46
54
  }
@@ -6,14 +6,60 @@ Object.defineProperty(exports, "__esModule", { value: true });
6
6
  exports.openai = void 0;
7
7
  exports.askLLM = askLLM;
8
8
  const openai_1 = __importDefault(require("openai"));
9
- const env_1 = require("../config/env");
10
- exports.openai = new openai_1.default({
11
- apiKey: env_1.env.openaiKey
12
- });
13
- async function askLLM(prompt) {
14
- const res = await exports.openai.chat.completions.create({
15
- model: env_1.env.model,
16
- messages: [{ role: "user", content: prompt }]
17
- });
18
- return res.choices[0].message.content;
9
+ const apiKey = process.env.OPENAI_API_KEY;
10
+ if (!apiKey) {
11
+ throw new Error("Missing OPENAI_API_KEY.\n" +
12
+ "Set it before running:\n" +
13
+ ' export OPENAI_API_KEY="sk-xxxx"\n');
14
+ }
15
+ exports.openai = new openai_1.default({ apiKey });
16
+ const sleep = (ms) => new Promise((r) => setTimeout(r, ms));
17
+ /**
18
+ * askLLM with:
19
+ * - retry on 429 using retry-after headers
20
+ * - fail gracefully if retry-after is extremely long (token budget exhausted)
21
+ */
22
+ async function askLLM(prompt, maxRetries = 5) {
23
+ const model = process.env.MODEL || "gpt-4.1-mini";
24
+ let attempt = 0;
25
+ while (true) {
26
+ try {
27
+ const res = await exports.openai.chat.completions.create({
28
+ model,
29
+ messages: [{ role: "user", content: prompt }],
30
+ });
31
+ return res.choices[0].message.content ?? "";
32
+ }
33
+ catch (err) {
34
+ attempt++;
35
+ const status = err?.status;
36
+ const headers = err?.headers || {};
37
+ const retryAfterMs = Number(headers["retry-after-ms"]) ||
38
+ (Number(headers["retry-after"]) || 0) * 1000;
39
+ if (status === 429) {
40
+ // If retry-after is huge => org quota/token budget exhausted for a long window.
41
+ if (retryAfterMs && retryAfterMs > 120000) {
42
+ throw new Error([
43
+ "Rate limited (429) due to token/request limits.",
44
+ `Retry-After is very long: ~${Math.ceil(retryAfterMs / 1000)}s.`,
45
+ "",
46
+ "Fix suggestions:",
47
+ "- Reduce input size:",
48
+ " ai-doc-agent generate --only architecture --max-files 5 --max-chars 30000",
49
+ "- Or run modules separately:",
50
+ " ai-doc-agent generate --only modules --max-files 4 --max-chars 25000",
51
+ "- Or increase your OpenAI limits / add billing on your OpenAI account.",
52
+ ].join("\n"));
53
+ }
54
+ if (attempt <= maxRetries) {
55
+ const wait = retryAfterMs || 20000;
56
+ console.warn(`⚠️ Rate limited (429). Retrying in ${Math.ceil(wait / 1000)}s... (${attempt}/${maxRetries})`);
57
+ await sleep(wait);
58
+ continue;
59
+ }
60
+ }
61
+ // Non-429 or exceeded retries
62
+ throw err;
63
+ }
64
+ }
19
65
  }
package/package.json CHANGED
@@ -1,13 +1,17 @@
1
1
  {
2
2
  "name": "@khiem_enhance/ai-doc-agent",
3
- "version": "0.1.1",
3
+ "version": "0.1.3",
4
4
  "description": "AI-powered documentation generator from source code",
5
5
  "license": "MIT",
6
6
  "bin": {
7
7
  "ai-doc-agent": "dist/cli.js"
8
8
  },
9
9
  "main": "dist/cli.js",
10
- "files": ["dist", "README.md", "LICENSE"],
10
+ "files": [
11
+ "dist",
12
+ "README.md",
13
+ "LICENSE"
14
+ ],
11
15
  "scripts": {
12
16
  "build": "tsc",
13
17
  "prepublishOnly": "npm run build"