agentaudit 3.13.10 → 3.13.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/cli.mjs +30 -7
  2. package/package.json +1 -1
package/cli.mjs CHANGED
@@ -2913,6 +2913,11 @@ async function callLlm(llmConfig, systemPrompt, userMessage) {
2913
2913
  if (!apiKey) return { error: `Missing API key: ${llmConfig.key}` };
2914
2914
  const start = Date.now();
2915
2915
 
2916
+ // --timeout flag (seconds), default 180s (3 min)
2917
+ const timeoutArgIdx = process.argv.indexOf('--timeout');
2918
+ const timeoutSec = timeoutArgIdx !== -1 ? Math.max(30, Math.min(600, parseInt(process.argv[timeoutArgIdx + 1], 10) || 180)) : 180;
2919
+ const timeoutMs = timeoutSec * 1000;
2920
+
2916
2921
  // Context window warning
2917
2922
  const ctxCheck = checkContextLimit(llmConfig.model, systemPrompt, userMessage);
2918
2923
  if (ctxCheck) {
@@ -2922,6 +2927,17 @@ async function callLlm(llmConfig, systemPrompt, userMessage) {
2922
2927
  }
2923
2928
  }
2924
2929
 
2930
+ // Live timer — updates every second while waiting for LLM
2931
+ let liveTimer = null;
2932
+ if (process.stdout.isTTY && !quietMode) {
2933
+ liveTimer = setInterval(() => {
2934
+ const secs = Math.round((Date.now() - start) / 1000);
2935
+ const remaining = timeoutSec - secs;
2936
+ const timerColor = remaining <= 30 ? c.yellow : c.dim;
2937
+ process.stdout.write(`\r ${stepProgress(4, 4)} Running LLM analysis ${c.dim}(${llmConfig.name})${c.reset} ${timerColor}${secs}s/${timeoutSec}s${c.reset} `);
2938
+ }, 1000);
2939
+ }
2940
+
2925
2941
  let _text = '';
2926
2942
  try {
2927
2943
  let data;
@@ -2930,7 +2946,7 @@ async function callLlm(llmConfig, systemPrompt, userMessage) {
2930
2946
  method: 'POST',
2931
2947
  headers: { 'x-api-key': apiKey, 'anthropic-version': '2023-06-01', 'content-type': 'application/json' },
2932
2948
  body: JSON.stringify({ model: llmConfig.model, max_tokens: getMaxOutputTokens(llmConfig.model), system: systemPrompt, messages: [{ role: 'user', content: userMessage }] }),
2933
- signal: AbortSignal.timeout(180_000),
2949
+ signal: AbortSignal.timeout(timeoutMs),
2934
2950
  });
2935
2951
  data = await safeJsonParse(res, llmConfig);
2936
2952
  if (data.error) {
@@ -2963,7 +2979,7 @@ async function callLlm(llmConfig, systemPrompt, userMessage) {
2963
2979
  contents: [{ role: 'user', parts: [{ text: userMessage }] }],
2964
2980
  generationConfig: { maxOutputTokens: getMaxOutputTokens(llmConfig.model), responseMimeType: 'application/json', thinkingConfig: { thinkingBudget: 8192 } },
2965
2981
  }),
2966
- signal: AbortSignal.timeout(180_000),
2982
+ signal: AbortSignal.timeout(timeoutMs),
2967
2983
  });
2968
2984
  data = await safeJsonParse(res, llmConfig);
2969
2985
  if (data.error) {
@@ -2991,7 +3007,7 @@ async function callLlm(llmConfig, systemPrompt, userMessage) {
2991
3007
  method: 'POST',
2992
3008
  headers,
2993
3009
  body: JSON.stringify({ model: llmConfig.model, max_tokens: getMaxOutputTokens(llmConfig.model), messages: [{ role: 'system', content: systemPrompt }, { role: 'user', content: userMessage }] }),
2994
- signal: AbortSignal.timeout(180_000),
3010
+ signal: AbortSignal.timeout(timeoutMs),
2995
3011
  });
2996
3012
  data = await safeJsonParse(res, llmConfig);
2997
3013
  if (data.error) {
@@ -3017,9 +3033,11 @@ async function callLlm(llmConfig, systemPrompt, userMessage) {
3017
3033
  }
3018
3034
  } catch (err) {
3019
3035
  const dur = Date.now() - start;
3020
- if (err.name === 'TimeoutError' || err.message?.includes('timeout')) return { error: 'Request timed out (180s)', hint: 'Try again or use a faster model', duration: dur };
3036
+ if (err.name === 'TimeoutError' || err.message?.includes('timeout')) return { error: `Request timed out (${timeoutSec}s)`, hint: `Increase timeout: --timeout ${timeoutSec * 2}`, duration: dur };
3021
3037
  if (err.code === 'ENOTFOUND' || err.code === 'ECONNREFUSED' || err.message?.includes('fetch failed')) return { error: `Network error: could not reach ${llmConfig.provider}`, hint: 'Check your internet connection', duration: dur };
3022
3038
  return { error: err.message, duration: dur };
3039
+ } finally {
3040
+ if (liveTimer) clearInterval(liveTimer);
3023
3041
  }
3024
3042
  }
3025
3043
 
@@ -3827,14 +3845,16 @@ async function auditRepo(url) {
3827
3845
 
3828
3846
  const llmResult = await callLlm(activeLlm, systemPrompt, userMessage);
3829
3847
 
3848
+ // Clear live timer line and print final status
3849
+ if (process.stdout.isTTY) process.stdout.write('\r\x1b[K');
3830
3850
  if (llmResult.error) {
3831
- console.log(` ${c.red}failed${c.reset}`);
3851
+ console.log(` ${stepProgress(4, 4)} Running LLM analysis ${c.dim}(${modelLabel})${c.reset} ${c.red}failed${c.reset} ${c.dim}(${elapsed(start)})${c.reset}`);
3832
3852
  console.log(` ${c.red}${llmResult.error}${c.reset}`);
3833
3853
  if (llmResult.hint) console.log(` ${c.dim}${llmResult.hint}${c.reset}`);
3834
3854
  return null;
3835
3855
  }
3836
3856
 
3837
- console.log(` ${c.green}done${c.reset} ${c.dim}(${elapsed(start)})${c.reset}`);
3857
+ console.log(` ${stepProgress(4, 4)} Running LLM analysis ${c.dim}(${modelLabel})${c.reset} ${c.green}done${c.reset} ${c.dim}(${elapsed(start)})${c.reset}`);
3838
3858
 
3839
3859
  if (llmResult.truncated) {
3840
3860
  console.log();
@@ -5243,11 +5263,13 @@ async function main() {
5243
5263
  // Strip global flags from args (including --model <value>, --format <value>)
5244
5264
  const globalFlags = new Set(['--json', '--quiet', '-q', '--no-color', '--no-upload', '--remote']);
5245
5265
  let args = rawArgs.filter(a => !globalFlags.has(a));
5246
- // Remove --model <value> and --models <value> pairs
5266
+ // Remove --model <value>, --models <value>, --timeout <value> pairs
5247
5267
  const modelIdx = args.indexOf('--model');
5248
5268
  if (modelIdx !== -1) args.splice(modelIdx, 2);
5249
5269
  const modelsIdx = args.indexOf('--models');
5250
5270
  if (modelsIdx !== -1) args.splice(modelsIdx, 2);
5271
+ const timeoutIdx = args.indexOf('--timeout');
5272
+ if (timeoutIdx !== -1) args.splice(timeoutIdx, 2);
5251
5273
  // Remove --format <value> pair
5252
5274
  const formatIdx = args.indexOf('--format');
5253
5275
  const formatFlag = formatIdx !== -1 ? args.splice(formatIdx, 2)[1] : null;
@@ -5323,6 +5345,7 @@ async function main() {
5323
5345
  ` <name> — specific model as verifier (e.g. sonnet)`,
5324
5346
  ` --no-verify Disable verification (even if default)`,
5325
5347
  ` --remote Use agentaudit.dev server (no LLM key needed, 3/day free)`,
5348
+ ` --timeout <sec> LLM request timeout in seconds (default: 180, max: 600)`,
5326
5349
  ` --model <name> Override LLM model for this run`,
5327
5350
  ` --models <a,b,c> Multi-model audit (parallel calls, consensus comparison)`,
5328
5351
  ` --no-upload Skip uploading report to registry`,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "agentaudit",
3
- "version": "3.13.10",
3
+ "version": "3.13.11",
4
4
  "description": "Security scanner for AI agent packages — CLI + MCP server",
5
5
  "type": "module",
6
6
  "bin": {