agentaudit 3.9.34 → 3.9.35
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cli.mjs +45 -7
- package/package.json +1 -1
package/cli.mjs
CHANGED
|
@@ -1444,11 +1444,34 @@ async function auditRepo(url) {
|
|
|
1444
1444
|
process.stdout.write(` ${c.dim}[3/4]${c.reset} Preparing audit payload...`);
|
|
1445
1445
|
const auditPrompt = loadAuditPrompt();
|
|
1446
1446
|
|
|
1447
|
+
// Build code block with smart truncation to fit context windows.
|
|
1448
|
+
// Reserve ~16k tokens for system prompt + output → budget ~48k tokens for code (~192k chars).
|
|
1449
|
+
// Smaller models may have 32-65k context; we aim for safe default.
|
|
1450
|
+
const MAX_CODE_CHARS = 180_000; // ~45k tokens
|
|
1447
1451
|
let codeBlock = '';
|
|
1452
|
+
let totalChars = 0;
|
|
1453
|
+
let truncatedFiles = 0;
|
|
1448
1454
|
for (const file of files) {
|
|
1449
|
-
|
|
1455
|
+
const entry = `\n### FILE: ${file.path}\n\`\`\`\n${file.content}\n\`\`\`\n`;
|
|
1456
|
+
if (totalChars + entry.length > MAX_CODE_CHARS) {
|
|
1457
|
+
// Try to fit a truncated version of this file
|
|
1458
|
+
const remaining = MAX_CODE_CHARS - totalChars;
|
|
1459
|
+
if (remaining > 200) {
|
|
1460
|
+
const truncContent = file.content.substring(0, remaining - 100);
|
|
1461
|
+
codeBlock += `\n### FILE: ${file.path}\n\`\`\`\n${truncContent}\n[... truncated ...]\n\`\`\`\n`;
|
|
1462
|
+
}
|
|
1463
|
+
truncatedFiles = files.length - codeBlock.split('### FILE:').length + 1;
|
|
1464
|
+
break;
|
|
1465
|
+
}
|
|
1466
|
+
codeBlock += entry;
|
|
1467
|
+
totalChars += entry.length;
|
|
1468
|
+
}
|
|
1469
|
+
if (truncatedFiles > 0) {
|
|
1470
|
+
codeBlock += `\n[⚠ ${truncatedFiles} file(s) omitted due to context window limits]\n`;
|
|
1471
|
+
console.log(` ${c.green}done${c.reset} ${c.yellow}(${truncatedFiles} files truncated to fit context window)${c.reset}`);
|
|
1472
|
+
} else {
|
|
1473
|
+
console.log(` ${c.green}done${c.reset}`);
|
|
1450
1474
|
}
|
|
1451
|
-
console.log(` ${c.green}done${c.reset}`);
|
|
1452
1475
|
|
|
1453
1476
|
// Step 4: LLM Analysis
|
|
1454
1477
|
// Check for API keys to determine which LLM to use
|
|
@@ -1639,7 +1662,12 @@ async function auditRepo(url) {
|
|
|
1639
1662
|
const data = await res.json();
|
|
1640
1663
|
if (data.error) {
|
|
1641
1664
|
console.log(` ${c.red}failed${c.reset}`);
|
|
1642
|
-
|
|
1665
|
+
const errMsg = data.error.message || JSON.stringify(data.error);
|
|
1666
|
+
console.log(` ${c.red}API error: ${errMsg}${c.reset}`);
|
|
1667
|
+
if (/context.length|maximum.*tokens|too.many.tokens/i.test(errMsg)) {
|
|
1668
|
+
console.log(` ${c.dim}This model's context window is too small for this repository.${c.reset}`);
|
|
1669
|
+
console.log(` ${c.dim}Try a model with a larger context: --model anthropic/claude-sonnet-4 (200k) or --model openai/gpt-4o (128k)${c.reset}`);
|
|
1670
|
+
}
|
|
1643
1671
|
try { fs.rmSync(tmpDir, { recursive: true, force: true }); } catch {}
|
|
1644
1672
|
return null;
|
|
1645
1673
|
}
|
|
@@ -1667,11 +1695,21 @@ async function auditRepo(url) {
|
|
|
1667
1695
|
try { fs.rmSync(tmpDir, { recursive: true, force: true }); } catch {}
|
|
1668
1696
|
|
|
1669
1697
|
if (!report) {
|
|
1670
|
-
|
|
1671
|
-
|
|
1672
|
-
|
|
1698
|
+
const rawLen = typeof _lastLlmText === 'string' ? _lastLlmText.length : 0;
|
|
1699
|
+
if (rawLen === 0) {
|
|
1700
|
+
console.log(` ${c.red}✖ Model returned an empty response${c.reset}`);
|
|
1701
|
+
console.log(` ${c.dim}This model may not support structured JSON output or the prompt was too large.${c.reset}`);
|
|
1702
|
+
console.log(` ${c.dim}Try a different model: --model anthropic/claude-sonnet-4 or --model openai/gpt-4o${c.reset}`);
|
|
1703
|
+
} else {
|
|
1704
|
+
console.log(` ${c.red}✖ Could not parse LLM response as JSON${c.reset}`);
|
|
1705
|
+
console.log(` ${c.dim}The model returned ${rawLen} chars but not valid JSON. Try a stronger model.${c.reset}`);
|
|
1706
|
+
if (!process.argv.includes('--debug')) {
|
|
1707
|
+
console.log(` ${c.dim}Hint: run with --debug to see the raw LLM response${c.reset}`);
|
|
1708
|
+
}
|
|
1709
|
+
}
|
|
1710
|
+
if (process.argv.includes('--debug') && rawLen > 0) {
|
|
1673
1711
|
console.log(` ${c.dim}--- Raw LLM response (first 2000 chars) ---${c.reset}`);
|
|
1674
|
-
console.log(
|
|
1712
|
+
console.log(_lastLlmText.slice(0, 2000));
|
|
1675
1713
|
console.log(` ${c.dim}--- end ---${c.reset}`);
|
|
1676
1714
|
}
|
|
1677
1715
|
return null;
|