agentaudit 3.13.7 → 3.13.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/cli.mjs +27 -3
  2. package/package.json +1 -1
package/cli.mjs CHANGED
@@ -2875,6 +2875,30 @@ async function safeJsonParse(res, llmConfig) {
2875
2875
  }
2876
2876
  }
2877
2877
 
2878
+ function getMaxOutputTokens(model) {
2879
+ // Known max_completion_tokens from provider docs / OpenRouter API
2880
+ const limits = {
2881
+ // Anthropic
2882
+ 'claude-haiku-4-5': 8192, 'claude-3-haiku': 4096, 'claude-3-5-haiku': 8192,
2883
+ 'claude-sonnet-4-6': 64000, 'claude-sonnet-4-5': 16384, 'claude-3-5-sonnet': 8192,
2884
+ 'claude-opus-4-6': 32768, 'claude-opus-4': 32768,
2885
+ // Google Gemini (all current models support 65536)
2886
+ 'gemini-3': 65536, 'gemini-2.5': 65536, 'gemini-2.0': 65536,
2887
+ // Qwen
2888
+ 'qwen3.5': 65536, 'qwen3': 32768, 'qwen2.5': 32768,
2889
+ // xAI
2890
+ 'grok-4': 32768, 'grok-3': 16384,
2891
+ // OpenAI
2892
+ 'gpt-4.1': 32768, 'gpt-4o': 16384, 'gpt-4-turbo': 4096,
2893
+ 'o3': 100000, 'o4-mini': 100000,
2894
+ };
2895
+ const m = (model || '').toLowerCase();
2896
+ for (const [key, val] of Object.entries(limits)) {
2897
+ if (m.includes(key)) return val;
2898
+ }
2899
+ return 16384; // conservative fallback for unknown models
2900
+ }
2901
+
2878
2902
  async function callLlm(llmConfig, systemPrompt, userMessage) {
2879
2903
  const apiKey = process.env[llmConfig.key];
2880
2904
  if (!apiKey) return { error: `Missing API key: ${llmConfig.key}` };
@@ -2896,7 +2920,7 @@ async function callLlm(llmConfig, systemPrompt, userMessage) {
2896
2920
  const res = await fetch(llmConfig.url, {
2897
2921
  method: 'POST',
2898
2922
  headers: { 'x-api-key': apiKey, 'anthropic-version': '2023-06-01', 'content-type': 'application/json' },
2899
- body: JSON.stringify({ model: llmConfig.model, max_tokens: 16384, system: systemPrompt, messages: [{ role: 'user', content: userMessage }] }),
2923
+ body: JSON.stringify({ model: llmConfig.model, max_tokens: getMaxOutputTokens(llmConfig.model), system: systemPrompt, messages: [{ role: 'user', content: userMessage }] }),
2900
2924
  signal: AbortSignal.timeout(180_000),
2901
2925
  });
2902
2926
  data = await safeJsonParse(res, llmConfig);
@@ -2928,7 +2952,7 @@ async function callLlm(llmConfig, systemPrompt, userMessage) {
2928
2952
  body: JSON.stringify({
2929
2953
  systemInstruction: { parts: [{ text: systemPrompt }] },
2930
2954
  contents: [{ role: 'user', parts: [{ text: userMessage }] }],
2931
- generationConfig: { maxOutputTokens: 65536, responseMimeType: 'application/json', thinkingConfig: { thinkingBudget: 8192 } },
2955
+ generationConfig: { maxOutputTokens: getMaxOutputTokens(llmConfig.model), responseMimeType: 'application/json', thinkingConfig: { thinkingBudget: 8192 } },
2932
2956
  }),
2933
2957
  signal: AbortSignal.timeout(180_000),
2934
2958
  });
@@ -2957,7 +2981,7 @@ async function callLlm(llmConfig, systemPrompt, userMessage) {
2957
2981
  const res = await fetch(llmConfig.url, {
2958
2982
  method: 'POST',
2959
2983
  headers,
2960
- body: JSON.stringify({ model: llmConfig.model, max_tokens: 16384, messages: [{ role: 'system', content: systemPrompt }, { role: 'user', content: userMessage }] }),
2984
+ body: JSON.stringify({ model: llmConfig.model, max_tokens: getMaxOutputTokens(llmConfig.model), messages: [{ role: 'system', content: systemPrompt }, { role: 'user', content: userMessage }] }),
2961
2985
  signal: AbortSignal.timeout(180_000),
2962
2986
  });
2963
2987
  data = await safeJsonParse(res, llmConfig);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "agentaudit",
3
- "version": "3.13.7",
3
+ "version": "3.13.9",
4
4
  "description": "Security scanner for AI agent packages — CLI + MCP server",
5
5
  "type": "module",
6
6
  "bin": {