agentaudit 3.9.18 → 3.9.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +32 -4
  2. package/cli.mjs +60 -32
  3. package/package.json +1 -1
package/README.md CHANGED
@@ -208,6 +208,7 @@ Then ask your agent: *"Check which MCP servers I have installed and audit any un
208
208
  | `agentaudit audit <url>` | Deep LLM-powered 3-pass audit (~30s) | `agentaudit audit https://github.com/owner/repo` |
209
209
  | `agentaudit lookup <name>` | Look up package in trust registry | `agentaudit lookup fastmcp` |
210
210
  | `agentaudit check <name\|url>` | Lookup + auto-audit if not found | `agentaudit check https://github.com/owner/repo` |
211
+ | `agentaudit status` | Check API keys + active LLM provider | `agentaudit status` |
211
212
  | `agentaudit setup` | Register agent + configure API key | `agentaudit setup` |
212
213
 
213
214
  ### Global Flags
@@ -217,6 +218,7 @@ Then ask your agent: *"Check which MCP servers I have installed and audit any un
217
218
  | `--json` | Output machine-readable JSON to stdout |
218
219
  | `--quiet` / `-q` | Suppress banner and decorative output (show findings only) |
219
220
  | `--no-color` | Disable ANSI colors (also respects `NO_COLOR` env var) |
221
+ | `--provider <name>` | Force LLM provider (`anthropic`, `openai`, `openrouter`, `ollama`, `custom`) |
220
222
  | `--help` / `-h` | Show help text |
221
223
  | `-v` / `--version` | Show version |
222
224
 
@@ -433,12 +435,19 @@ export AGENTAUDIT_API_KEY=asf_your_key_here
433
435
  | Variable | Description |
434
436
  |----------|-------------|
435
437
  | `AGENTAUDIT_API_KEY` | API key for registry access |
436
- | `ANTHROPIC_API_KEY` | Anthropic API key for deep audits (Claude) |
438
+ | `ANTHROPIC_API_KEY` | Anthropic API key for deep audits (Claude) -- recommended |
437
439
  | `OPENAI_API_KEY` | OpenAI API key for deep audits (GPT-4o) |
438
440
  | `OPENROUTER_API_KEY` | OpenRouter API key (access 200+ models) |
439
441
  | `OPENROUTER_MODEL` | Model to use via OpenRouter (default: `anthropic/claude-sonnet-4`) |
442
+ | `OLLAMA_MODEL` | Ollama model name for local audits (e.g. `llama3.1`, `qwen2.5-coder`) |
443
+ | `OLLAMA_HOST` | Ollama server URL (default: `http://localhost:11434`) |
444
+ | `LLM_API_URL` | Any OpenAI-compatible API endpoint (e.g. LM Studio, vLLM, Together, Groq) |
445
+ | `LLM_API_KEY` | API key for custom endpoint (optional if no auth needed) |
446
+ | `LLM_MODEL` | Model name for custom endpoint |
440
447
  | `NO_COLOR` | Disable ANSI colors ([no-color.org](https://no-color.org)) |
441
448
 
449
+ > **Provider priority:** Anthropic > OpenAI > OpenRouter > Custom > Ollama. Override with `--provider=ollama` etc.
450
+
442
451
  ---
443
452
 
444
453
  ## 📦 Requirements
@@ -468,7 +477,7 @@ Or use without installing: `npx agentaudit`
468
477
 
469
478
  ### Setting up your LLM key for deep audits
470
479
 
471
- The `audit` command supports **three LLM providers**. Set one of these environment variables:
480
+ The `audit` command supports **any LLM provider**. Set one of these environment variables:
472
481
 
473
482
  ```bash
474
483
  # Linux / macOS
@@ -487,13 +496,32 @@ set OPENAI_API_KEY=sk-...
487
496
  set OPENROUTER_API_KEY=sk-or-...
488
497
  ```
489
498
 
490
- **Provider priority:** Anthropic > OpenAI > OpenRouter. The active provider is shown during the audit.
499
+ **Provider priority:** Anthropic > OpenAI > OpenRouter > Custom > Ollama. Override with `--provider=<name>`.
491
500
 
492
- **OpenRouter model selection:** By default, OpenRouter uses `anthropic/claude-sonnet-4`. Override with:
501
+ **OpenRouter model selection:** By default uses `anthropic/claude-sonnet-4`. Override with:
493
502
  ```bash
494
503
  export OPENROUTER_MODEL=google/gemini-2.5-pro # or any model on openrouter.ai
495
504
  ```
496
505
 
506
+ **Local with Ollama (free, no API key):**
507
+ ```bash
508
+ export OLLAMA_MODEL=llama3.1 # or qwen2.5-coder, deepseek-r1, etc.
509
+ agentaudit audit https://github.com/owner/repo
510
+ ```
511
+ > Note: Local models produce lower quality audits than Claude/GPT-4o. Use for quick checks, not production security audits.
512
+
513
+ **Any OpenAI-compatible API:**
514
+ ```bash
515
+ export LLM_API_URL=http://localhost:1234/v1 # LM Studio, vLLM, etc.
516
+ export LLM_MODEL=my-model
517
+ agentaudit audit https://github.com/owner/repo
518
+ ```
519
+
520
+ **Check your setup:**
521
+ ```bash
522
+ agentaudit status # validates all configured API keys
523
+ ```
524
+
497
525
  **Troubleshooting:** If you see `API error: Incorrect API key`, double-check your key is valid and has credits. Use `--debug` to see the full API response.
498
526
 
499
527
  ### What data is sent externally?
package/cli.mjs CHANGED
@@ -28,22 +28,31 @@ const REGISTRY_URL = 'https://agentaudit.dev';
28
28
 
29
29
  // ── Provider resolution ────
30
30
  function resolveProvider(preferred, keys) {
31
+ const orModel = process.env.OPENROUTER_MODEL || 'anthropic/claude-sonnet-4';
32
+ const ollamaModel = process.env.OLLAMA_MODEL || 'llama3.1';
33
+ const ollamaHost = process.env.OLLAMA_HOST || 'http://localhost:11434';
34
+ const customUrl = process.env.LLM_API_URL;
35
+ const customKey = process.env.LLM_API_KEY;
36
+ const customModel = process.env.LLM_MODEL || 'default';
37
+
31
38
  const providers = {
32
39
  anthropic: keys.anthropicKey ? { id: 'anthropic', label: 'Anthropic (Claude)', key: keys.anthropicKey } : null,
33
40
  openai: keys.openaiKey ? { id: 'openai', label: 'OpenAI (GPT-4o)', key: keys.openaiKey } : null,
34
- openrouter: keys.openrouterKey ? { id: 'openrouter', label: `OpenRouter (${process.env.OPENROUTER_MODEL || 'anthropic/claude-sonnet-4'})`, key: keys.openrouterKey } : null,
41
+ openrouter: keys.openrouterKey ? { id: 'openrouter', label: `OpenRouter (${orModel})`, key: keys.openrouterKey } : null,
42
+ ollama: process.env.OLLAMA_MODEL || process.env.OLLAMA_HOST ? { id: 'ollama', label: `Ollama (${ollamaModel})`, key: null, host: ollamaHost, model: ollamaModel } : null,
43
+ custom: customUrl ? { id: 'custom', label: `Custom (${customModel})`, key: customKey, url: customUrl, model: customModel } : null,
35
44
  };
36
45
  // Aliases
37
- const aliases = { claude: 'anthropic', gpt: 'openai', 'gpt-4o': 'openai', 'gpt4': 'openai', or: 'openrouter' };
46
+ const aliases = { claude: 'anthropic', gpt: 'openai', 'gpt-4o': 'openai', 'gpt4': 'openai', or: 'openrouter', local: 'ollama' };
38
47
 
39
48
  if (preferred) {
40
49
  const resolved = aliases[preferred] || preferred;
41
50
  const p = providers[resolved];
42
- if (!p) return null; // requested provider not available
51
+ if (!p) return null;
43
52
  return p;
44
53
  }
45
- // Auto-detect: Anthropic > OpenAI > OpenRouter
46
- return providers.anthropic || providers.openai || providers.openrouter || null;
54
+ // Auto-detect priority: Anthropic > OpenAI > OpenRouter > Custom > Ollama (local last — usually weaker)
55
+ return providers.anthropic || providers.openai || providers.openrouter || providers.custom || providers.ollama || null;
47
56
  }
48
57
 
49
58
  // ── Global flags (set in main before command routing) ────
@@ -1344,10 +1353,14 @@ async function auditRepo(url) {
1344
1353
  if (!resolvedProvider) {
1345
1354
  // No LLM API key — clear explanation
1346
1355
  console.log();
1347
- console.log(` ${c.yellow}No LLM API key found.${c.reset} The ${c.bold}audit${c.reset} command needs an LLM to analyze code.`);
1356
+ console.log(` ${c.yellow}No LLM provider configured.${c.reset} The ${c.bold}audit${c.reset} command needs an LLM to analyze code.`);
1348
1357
  console.log();
1349
- console.log(` ${c.bold}Option 1: Set an API key${c.reset}`);
1350
- console.log(` Supported keys: ${c.cyan}ANTHROPIC_API_KEY${c.reset}, ${c.cyan}OPENAI_API_KEY${c.reset}, or ${c.cyan}OPENROUTER_API_KEY${c.reset}`);
1358
+ console.log(` ${c.bold}Option 1: Set an API key${c.reset} ${c.dim}(any one of these)${c.reset}`);
1359
+ console.log(` ${c.cyan}ANTHROPIC_API_KEY${c.reset} Anthropic Claude ${c.dim}(recommended)${c.reset}`);
1360
+ console.log(` ${c.cyan}OPENAI_API_KEY${c.reset} OpenAI GPT-4o`);
1361
+ console.log(` ${c.cyan}OPENROUTER_API_KEY${c.reset} OpenRouter ${c.dim}(200+ models)${c.reset}`);
1362
+ console.log(` ${c.cyan}OLLAMA_MODEL${c.reset} Ollama ${c.dim}(local, free, set model name)${c.reset}`);
1363
+ console.log(` ${c.cyan}LLM_API_URL${c.reset} Any OpenAI-compatible API ${c.dim}(+ LLM_API_KEY, LLM_MODEL)${c.reset}`);
1351
1364
  console.log();
1352
1365
  console.log(` ${c.dim}# Linux / macOS:${c.reset}`);
1353
1366
  console.log(` ${c.dim}export ANTHROPIC_API_KEY=sk-ant-...${c.reset}`);
@@ -1451,19 +1464,33 @@ async function auditRepo(url) {
1451
1464
  _lastLlmText = text;
1452
1465
  report = extractJSON(text);
1453
1466
  } else {
1454
- // OpenAI or OpenRouter (both use OpenAI-compatible API)
1455
- const isOpenRouter = resolvedProvider.id === 'openrouter';
1456
- const apiUrl = isOpenRouter ? 'https://openrouter.ai/api/v1/chat/completions' : 'https://api.openai.com/v1/chat/completions';
1457
- const modelName = isOpenRouter ? (process.env.OPENROUTER_MODEL || 'anthropic/claude-sonnet-4') : 'gpt-4o';
1458
- const extraHeaders = isOpenRouter ? { 'HTTP-Referer': 'https://agentaudit.dev', 'X-Title': 'AgentAudit' } : {};
1467
+ // OpenAI, OpenRouter, Ollama, or Custom (all use OpenAI-compatible chat completions API)
1468
+ let apiUrl, modelName, authHeaders;
1469
+ switch (resolvedProvider.id) {
1470
+ case 'openrouter':
1471
+ apiUrl = 'https://openrouter.ai/api/v1/chat/completions';
1472
+ modelName = process.env.OPENROUTER_MODEL || 'anthropic/claude-sonnet-4';
1473
+ authHeaders = { 'Authorization': `Bearer ${resolvedProvider.key}`, 'HTTP-Referer': 'https://agentaudit.dev', 'X-Title': 'AgentAudit' };
1474
+ break;
1475
+ case 'ollama':
1476
+ apiUrl = `${resolvedProvider.host}/v1/chat/completions`;
1477
+ modelName = resolvedProvider.model;
1478
+ authHeaders = {};
1479
+ break;
1480
+ case 'custom':
1481
+ apiUrl = resolvedProvider.url.endsWith('/chat/completions') ? resolvedProvider.url : `${resolvedProvider.url.replace(/\/$/, '')}/chat/completions`;
1482
+ modelName = resolvedProvider.model;
1483
+ authHeaders = resolvedProvider.key ? { 'Authorization': `Bearer ${resolvedProvider.key}` } : {};
1484
+ break;
1485
+ default: // openai
1486
+ apiUrl = 'https://api.openai.com/v1/chat/completions';
1487
+ modelName = 'gpt-4o';
1488
+ authHeaders = { 'Authorization': `Bearer ${resolvedProvider.key}` };
1489
+ }
1459
1490
 
1460
1491
  const res = await fetch(apiUrl, {
1461
1492
  method: 'POST',
1462
- headers: {
1463
- 'Authorization': `Bearer ${resolvedProvider.key}`,
1464
- 'Content-Type': 'application/json',
1465
- ...extraHeaders,
1466
- },
1493
+ headers: { 'Content-Type': 'application/json', ...authHeaders },
1467
1494
  body: JSON.stringify({
1468
1495
  model: modelName,
1469
1496
  max_tokens: 8192,
@@ -1472,7 +1499,7 @@ async function auditRepo(url) {
1472
1499
  { role: 'user', content: userMessage },
1473
1500
  ],
1474
1501
  }),
1475
- signal: AbortSignal.timeout(120_000),
1502
+ signal: AbortSignal.timeout(resolvedProvider.id === 'ollama' ? 300_000 : 120_000), // Ollama: 5min (local can be slow)
1476
1503
  });
1477
1504
  const data = await res.json();
1478
1505
  if (data.error) {
@@ -1706,19 +1733,14 @@ async function main() {
1706
1733
  console.log(` agentaudit audit https://github.com/owner/repo`);
1707
1734
  console.log(` agentaudit lookup fastmcp --json`);
1708
1735
  console.log();
1709
- console.log(` ${c.bold}For deep audits,${c.reset} set an LLM API key (any one):`);
1710
- if (process.platform === 'win32') {
1711
- console.log(` ${c.dim}PowerShell: $env:ANTHROPIC_API_KEY = "sk-ant-..."${c.reset}`);
1712
- console.log(` ${c.dim} $env:OPENAI_API_KEY = "sk-..."${c.reset}`);
1713
- console.log(` ${c.dim} $env:OPENROUTER_API_KEY = "sk-or-..."${c.reset}`);
1714
- console.log(` ${c.dim}CMD: set ANTHROPIC_API_KEY=sk-ant-...${c.reset}`);
1715
- console.log(` ${c.dim} set OPENAI_API_KEY=sk-...${c.reset}`);
1716
- console.log(` ${c.dim} set OPENROUTER_API_KEY=sk-or-...${c.reset}`);
1717
- } else {
1718
- console.log(` ${c.dim}export ANTHROPIC_API_KEY=sk-ant-...${c.reset}`);
1719
- console.log(` ${c.dim}export OPENAI_API_KEY=sk-...${c.reset}`);
1720
- console.log(` ${c.dim}export OPENROUTER_API_KEY=sk-or-...${c.reset} ${c.dim}(200+ models, set OPENROUTER_MODEL to pick)${c.reset}`);
1721
- }
1736
+ console.log(` ${c.bold}For deep audits,${c.reset} set an LLM provider (any one):`);
1737
+ console.log(` ${c.dim}ANTHROPIC_API_KEY Anthropic Claude (recommended)${c.reset}`);
1738
+ console.log(` ${c.dim}OPENAI_API_KEY OpenAI GPT-4o${c.reset}`);
1739
+ console.log(` ${c.dim}OPENROUTER_API_KEY 200+ models (+ OPENROUTER_MODEL)${c.reset}`);
1740
+ console.log(` ${c.dim}OLLAMA_MODEL Local Ollama (+ OLLAMA_HOST)${c.reset}`);
1741
+ console.log(` ${c.dim}LLM_API_URL Any OpenAI-compatible API (+ LLM_API_KEY, LLM_MODEL)${c.reset}`);
1742
+ console.log();
1743
+ console.log(` ${c.dim}Run ${c.cyan}agentaudit status${c.dim} to check your configured providers.${c.reset}`);
1722
1744
  console.log();
1723
1745
  console.log(` ${c.bold}Or use as MCP server${c.reset} in Cursor/Claude ${c.dim}(no extra API key needed):${c.reset}`);
1724
1746
  console.log(` ${c.dim}Add to your MCP config:${c.reset}`);
@@ -1746,10 +1768,16 @@ async function main() {
1746
1768
  openaiKey: process.env.OPENAI_API_KEY,
1747
1769
  openrouterKey: process.env.OPENROUTER_API_KEY,
1748
1770
  };
1771
+ const ollamaHost = process.env.OLLAMA_HOST || 'http://localhost:11434';
1772
+ const ollamaModel = process.env.OLLAMA_MODEL;
1773
+ const customUrl = process.env.LLM_API_URL;
1774
+
1749
1775
  const checks = [
1750
1776
  { name: 'Anthropic', env: 'ANTHROPIC_API_KEY', key: keys.anthropicKey, testUrl: 'https://api.anthropic.com/v1/messages', testHeaders: (k) => ({ 'x-api-key': k, 'anthropic-version': '2023-06-01', 'content-type': 'application/json' }), testBody: JSON.stringify({ model: 'claude-sonnet-4-20250514', max_tokens: 1, messages: [{ role: 'user', content: 'hi' }] }) },
1751
1777
  { name: 'OpenAI', env: 'OPENAI_API_KEY', key: keys.openaiKey, testUrl: 'https://api.openai.com/v1/models', testHeaders: (k) => ({ 'Authorization': `Bearer ${k}` }), testBody: null },
1752
1778
  { name: 'OpenRouter', env: 'OPENROUTER_API_KEY', key: keys.openrouterKey, testUrl: 'https://openrouter.ai/api/v1/models', testHeaders: (k) => ({ 'Authorization': `Bearer ${k}` }), testBody: null },
1779
+ { name: 'Ollama', env: 'OLLAMA_MODEL', key: ollamaModel, testUrl: `${ollamaHost}/api/tags`, testHeaders: () => ({}), testBody: null },
1780
+ { name: 'Custom', env: 'LLM_API_URL', key: customUrl, testUrl: customUrl ? `${customUrl.replace(/\/$/, '')}/models` : null, testHeaders: (k) => process.env.LLM_API_KEY ? ({ 'Authorization': `Bearer ${process.env.LLM_API_KEY}` }) : {}, testBody: null },
1753
1781
  ];
1754
1782
 
1755
1783
  for (const p of checks) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "agentaudit",
3
- "version": "3.9.18",
3
+ "version": "3.9.19",
4
4
  "description": "Security scanner for AI packages — MCP server + CLI",
5
5
  "type": "module",
6
6
  "bin": {