@hungpg/skill-audit 0.1.1 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -89,9 +89,11 @@ Feeds are cached locally with automatic freshness checks:
89
89
 
90
90
  | Source | Update Frequency | Cache Lifetime |
91
91
  |--------|------------------|----------------|
92
- | CISA KEV | Daily | 7 days |
93
- | FIRST EPSS | Daily | 7 days |
92
+ | CISA KEV | Daily | 1 day |
93
+ | NIST NVD | Daily | 1 day |
94
+ | FIRST EPSS | Daily | 3 days |
94
95
  | OSV.dev | On-query | 7 days |
96
+ | GHSA | On-query | 3 days |
95
97
 
96
98
  **Automatic updates:**
97
99
  - Runs on `npm install` via `postinstall` hook
@@ -100,6 +102,20 @@ Feeds are cached locally with automatic freshness checks:
100
102
 
101
103
  **Stale cache warning:** Audit output warns if feeds are >3 days old.
102
104
 
105
+ ### NVD Synchronization
106
+
107
+ The `--update-db` command fetches CVEs modified in the last 24 hours only.
108
+ For initial setup or after extended offline periods, run multiple times to build historical data:
109
+
110
+ ```bash
111
+ # Multiple updates to build historical data
112
+ skill-audit --update-db
113
+ skill-audit --update-db
114
+ skill-audit --update-db
115
+ ```
116
+
117
+ Note: NVD API rate limits apply (5 requests/30 sec without API key). Set `NVD_API_KEY` environment variable for 50 requests/30 sec.
118
+
103
119
  ## Trust Sources
104
120
 
105
121
  1. Static pattern matching for known attack vectors
package/SKILL.md CHANGED
@@ -1,6 +1,6 @@
1
1
  ---
2
2
  name: skill-audit
3
- description: This skill should be used when the user asks to "audit AI agent skills for security vulnerabilities", "evaluate third-party skills before installing", "check for prompt injection or secrets leakage", "scan skills for code execution risks", "validate skills against Agent Skills specification", or "assess skill security posture with CVE/GHSA/KEV/EPSS intelligence".
3
+ description: This skill should be used when the user asks to "audit AI agent skills for security vulnerabilities", "evaluate third-party skills before installing", "check for prompt injection or secrets leakage", "scan skills for code execution risks", "validate skills against Agent Skills specification", or "assess skill security posture with CVE/GHSA/KEV/EPSS/NVD intelligence".
4
4
  license: MIT
5
5
  compatibility: Node.js 18+ with npm or yarn
6
6
  metadata:
@@ -75,6 +75,8 @@ Full security audit including:
75
75
  Pulls latest vulnerability intelligence:
76
76
  - CISA KEV (Known Exploited Vulnerabilities)
77
77
  - FIRST EPSS (Exploit Prediction Scoring) - via api.first.org/data/v1
78
+ - NIST NVD (National Vulnerability Database) - CVSS scores, CWE mappings
79
+ - GitHub Security Advisories (GHSA) - ecosystem-specific advisories
78
80
  - OSV.dev vulnerabilities
79
81
 
80
82
  Caches to `.cache/skill-audit/feeds/` for offline use.
@@ -148,7 +150,7 @@ npx skill-audit -g -o ./audit-report.json
148
150
  npx skill-audit -g -t 3.0
149
151
 
150
152
  # Update intelligence feeds
151
- npx skill-audit --update-db --source kev epss
153
+ npx skill-audit --update-db --source kev epss nvd
152
154
 
153
155
  # Audit project-level skills only
154
156
  npx skill-audit -p --mode audit -v
@@ -164,7 +166,7 @@ Found 3 skills
164
166
  Safe: 1 | Risky: 1 | Dangerous: 1 | Malicious: 0
165
167
  Skills with spec issues: 1 | Security issues: 2
166
168
 
167
- ⚠️ Vulnerability DB is stale (4.2 days for KEV, 5.1 days for EPSS)
169
+ ⚠️ Vulnerability DB is stale (4.2 days for KEV, 5.1 days for EPSS, 2.0 days for NVD)
168
170
  Run: npx skill-audit --update-db
169
171
 
170
172
  ❌ 1 skills exceed threshold 3.0
@@ -189,8 +191,9 @@ Three-layer validation approach:
189
191
  - Maps to OWASP Agentic Top 10
190
192
 
191
193
  3. **Intelligence Service**
192
- - Caches CVE/GHSA/KEV/EPSS data
194
+ - Caches CVE/GHSA/KEV/EPSS/NVD data
193
195
  - Native HTTP/fetch (no shell dependencies)
196
+ - Differentiated cache lifetimes by source (KEV/NVD: 1 day, EPSS/GHSA: 3 days, OSV: 7 days)
194
197
 
195
198
  ## Related Skills
196
199
 
@@ -216,6 +219,8 @@ Three-layer validation approach:
216
219
  - **[OWASP AI Security Top 10](https://owasp.org/www-project-top-ten.html)** - ASI01-ASI10 threat categories
217
220
  - **[CISA KEV Catalog](https://www.cisa.gov/known-exploited-vulnerabilities-catalog)** - Actively exploited vulnerabilities
218
221
  - **[FIRST EPSS](https://www.first.org/epss/)** - Exploit Prediction Scoring System
222
+ - **[NIST NVD](https://nvd.nist.gov/)** - National Vulnerability Database (official CVE database)
223
+ - **[GitHub Security Advisories](https://github.com/advisories)** - GHSA vulnerability database
219
224
  - **[OSV.dev](https://osv.dev/)** - Open Source Vulnerability database
220
225
 
221
226
  ### Intelligence Cache
@@ -223,5 +228,7 @@ Three-layer validation approach:
223
228
  | Source | Update Frequency | Max Cache Age | Warning Threshold |
224
229
  |--------|-----------------|---------------|-------------------|
225
230
  | CISA KEV | Daily | 1 day | 3 days |
231
+ | NIST NVD | Daily | 1 day | 3 days |
232
+ | GitHub GHSA | 3 days | 3 days | 3 days |
226
233
  | FIRST EPSS | 3-day cycle | 3 days | 3 days |
227
234
  | OSV.dev | On-query | 7 days | 3 days |
package/dist/deps.js CHANGED
@@ -14,6 +14,52 @@ const OSV_ECOSYSTEMS = {
14
14
  'RubyGems': 'ruby',
15
15
  'Packagist': 'php',
16
16
  'Pub': 'dart',
17
+ 'NuGet': 'dotnet',
18
+ 'Hex': 'elixir',
19
+ 'ConanCenter': 'cpp',
20
+ 'Bioconductor': 'r',
21
+ 'SwiftURL': 'swift',
22
+ };
23
+ // Supported lockfile patterns and their ecosystems
24
+ const LOCKFILE_PATTERNS = {
25
+ // JavaScript/TypeScript
26
+ 'package-lock.json': { ecosystem: 'npm', parser: 'json' },
27
+ 'yarn.lock': { ecosystem: 'npm', parser: 'yarn' },
28
+ 'pnpm-lock.yaml': { ecosystem: 'npm', parser: 'yaml' },
29
+ 'bun.lockb': { ecosystem: 'npm', parser: 'binary' },
30
+ // Python
31
+ 'requirements.txt': { ecosystem: 'PyPI', parser: 'text' },
32
+ 'Pipfile.lock': { ecosystem: 'PyPI', parser: 'json' },
33
+ 'poetry.lock': { ecosystem: 'PyPI', parser: 'toml' },
34
+ 'pdm.lock': { ecosystem: 'PyPI', parser: 'toml' },
35
+ 'uv.lock': { ecosystem: 'PyPI', parser: 'toml' },
36
+ 'pylock.toml': { ecosystem: 'PyPI', parser: 'toml' },
37
+ // Rust
38
+ 'Cargo.lock': { ecosystem: 'crates.io', parser: 'toml' },
39
+ // Ruby
40
+ 'Gemfile.lock': { ecosystem: 'RubyGems', parser: 'text' },
41
+ 'gems.locked': { ecosystem: 'RubyGems', parser: 'text' },
42
+ // PHP
43
+ 'composer.lock': { ecosystem: 'Packagist', parser: 'json' },
44
+ // Java
45
+ 'pom.xml': { ecosystem: 'Maven', parser: 'xml' },
46
+ 'buildscript-gradle.lockfile': { ecosystem: 'Maven', parser: 'text' },
47
+ 'gradle.lockfile': { ecosystem: 'Maven', parser: 'text' },
48
+ // Go
49
+ 'go.mod': { ecosystem: 'Go', parser: 'text' },
50
+ 'go.sum': { ecosystem: 'Go', parser: 'text' },
51
+ // .NET
52
+ 'packages.lock.json': { ecosystem: 'NuGet', parser: 'json' },
53
+ 'deps.json': { ecosystem: 'NuGet', parser: 'json' },
54
+ 'packages.config': { ecosystem: 'NuGet', parser: 'xml' },
55
+ // Dart
56
+ 'pubspec.lock': { ecosystem: 'Pub', parser: 'yaml' },
57
+ // Elixir
58
+ 'mix.lock': { ecosystem: 'Hex', parser: 'elixir' },
59
+ // C/C++
60
+ 'conan.lock': { ecosystem: 'ConanCenter', parser: 'text' },
61
+ // R
62
+ 'renv.lock': { ecosystem: 'Bioconductor', parser: 'json' },
17
63
  };
18
64
  // Check if a scanner is available
19
65
  function isScannerAvailable(scanner) {
@@ -251,62 +297,214 @@ function extractPackagesFromLockfiles(resolvedPath) {
251
297
  const packages = [];
252
298
  try {
253
299
  const files = readdirSync(resolvedPath);
254
- // Parse package-lock.json
255
- const pkgLock = files.find(f => f === 'package-lock.json');
256
- if (pkgLock) {
257
- const content = JSON.parse(readFileSync(join(resolvedPath, pkgLock), 'utf-8'));
258
- if (content.packages) {
259
- for (const [path, pkg] of Object.entries(content.packages)) {
300
+ // Iterate through all supported lockfile patterns
301
+ for (const [filename, config] of Object.entries(LOCKFILE_PATTERNS)) {
302
+ const lockfile = files.find(f => f === filename);
303
+ if (!lockfile)
304
+ continue;
305
+ const filepath = join(resolvedPath, lockfile);
306
+ const content = readFileSync(filepath, 'utf-8');
307
+ try {
308
+ switch (config.parser) {
309
+ case 'json':
310
+ parseJSONLockfile(content, config.ecosystem, packages);
311
+ break;
312
+ case 'yaml':
313
+ parseYAMLLockfile(content, config.ecosystem, packages);
314
+ break;
315
+ case 'toml':
316
+ parseTOMLLockfile(content, config.ecosystem, packages);
317
+ break;
318
+ case 'text':
319
+ parseTextLockfile(content, config.ecosystem, packages, filename);
320
+ break;
321
+ // Binary and XML parsers would require additional dependencies
322
+ // For now, skip binary files and use basic XML parsing
323
+ }
324
+ }
325
+ catch (e) {
326
+ console.warn(`Failed to parse ${filename}:`, e);
327
+ }
328
+ }
329
+ }
330
+ catch (e) {
331
+ // Ignore top-level errors
332
+ }
333
+ return packages;
334
+ }
335
+ // Parse JSON lockfiles (package-lock.json, Pipfile.lock, composer.lock, etc.)
336
+ function parseJSONLockfile(content, ecosystem, packages) {
337
+ const data = JSON.parse(content);
338
+ // package-lock.json format (object with packages)
339
+ if (data.packages && typeof data.packages === 'object' && !Array.isArray(data.packages)) {
340
+ for (const [path, pkg] of Object.entries(data.packages)) {
341
+ const p = pkg;
342
+ if (p.version && path !== '') {
343
+ const name = p.name || path.split('node_modules/').pop()?.split('/')[0];
344
+ if (name) {
345
+ packages.push({ name, version: p.version.replace(/^\^|~/, ''), ecosystem });
346
+ }
347
+ }
348
+ }
349
+ }
350
+ // Pipfile.lock format
351
+ if (data.default || data.develop) {
352
+ for (const section of ['default', 'develop']) {
353
+ if (data[section]) {
354
+ for (const [name, pkg] of Object.entries(data[section])) {
260
355
  const p = pkg;
261
- if (p.version && path !== '') {
262
- // Extract package name from path
263
- const name = path.split('node_modules/').pop()?.split('/')[0];
264
- if (name) {
265
- packages.push({ name, version: p.version.replace(/^\^|~/, ''), ecosystem: 'npm' });
266
- }
356
+ if (p.version) {
357
+ packages.push({ name: name.toLowerCase(), version: p.version.replace(/^[=<>!~]+/, ''), ecosystem });
267
358
  }
268
359
  }
269
360
  }
270
361
  }
271
- // Parse requirements.txt
272
- const reqTxt = files.find(f => f === 'requirements.txt');
273
- if (reqTxt) {
274
- const content = readFileSync(join(resolvedPath, reqTxt), 'utf-8');
275
- for (const line of content.split('\n')) {
276
- const match = line.match(/^([a-zA-Z0-9_-]+)([=<>!~]+)(.+)$/);
362
+ }
363
+ // composer.lock format (array of packages)
364
+ if (Array.isArray(data.packages)) {
365
+ for (const pkg of data.packages) {
366
+ if (pkg.name && pkg.version) {
367
+ packages.push({ name: pkg.name, version: pkg.version.replace(/^[=<>!~v]+/, ''), ecosystem });
368
+ }
369
+ }
370
+ }
371
+ // renv.lock format
372
+ if (data.Packages) {
373
+ for (const [name, pkg] of Object.entries(data.Packages)) {
374
+ const p = pkg;
375
+ if (p.Version) {
376
+ packages.push({ name, version: p.Version, ecosystem });
377
+ }
378
+ }
379
+ }
380
+ }
381
+ // Parse YAML lockfiles (yarn.lock, pubspec.lock, pnpm-lock.yaml)
382
+ function parseYAMLLockfile(content, ecosystem, packages) {
383
+ // Simple YAML parsing without external dependency
384
+ // For production, consider using a YAML parser library
385
+ const lines = content.split('\n');
386
+ let currentPackage = '';
387
+ for (const line of lines) {
388
+ // yarn.lock format: "package@version":
389
+ const yarnMatch = line.match(/^"?([^@"]+)@([^"]+)":/);
390
+ if (yarnMatch) {
391
+ packages.push({ name: yarnMatch[1], version: yarnMatch[2].replace(/^[^0-9]*/, ''), ecosystem });
392
+ continue;
393
+ }
394
+ // pubspec.lock format
395
+ const pubMatch = line.match(/^\s+name:\s*(.+)$/);
396
+ if (pubMatch) {
397
+ currentPackage = pubMatch[1].trim();
398
+ continue;
399
+ }
400
+ const pubVersion = line.match(/^\s+version:\s*"?(.+)"?$/);
401
+ if (pubVersion && currentPackage) {
402
+ packages.push({ name: currentPackage, version: pubVersion[1], ecosystem });
403
+ currentPackage = '';
404
+ }
405
+ }
406
+ }
407
+ // Parse TOML lockfiles (Cargo.lock, poetry.lock, etc.)
408
+ function parseTOMLLockfile(content, ecosystem, packages) {
409
+ // Simple TOML parsing without external dependency
410
+ const lines = content.split('\n');
411
+ let currentPackage = '';
412
+ for (const line of lines) {
413
+ // Cargo.lock format: [[package]]
414
+ if (line.startsWith('[[')) {
415
+ currentPackage = '';
416
+ continue;
417
+ }
418
+ const nameMatch = line.match(/^name\s*=\s*"(.+)"$/);
419
+ if (nameMatch) {
420
+ currentPackage = nameMatch[1];
421
+ continue;
422
+ }
423
+ const versionMatch = line.match(/^version\s*=\s*"(.+)"$/);
424
+ if (versionMatch && currentPackage) {
425
+ packages.push({ name: currentPackage, version: versionMatch[1], ecosystem });
426
+ }
427
+ }
428
+ }
429
+ // Parse text-based lockfiles (requirements.txt, Gemfile.lock, go.mod, etc.)
430
+ function parseTextLockfile(content, ecosystem, packages, filename) {
431
+ const lines = content.split('\n');
432
+ // requirements.txt format
433
+ if (filename === 'requirements.txt') {
434
+ for (const line of lines) {
435
+ const match = line.match(/^([a-zA-Z0-9_-]+)([=<>!~]+)(.+)$/);
436
+ if (match) {
437
+ packages.push({ name: match[1], version: match[3].trim(), ecosystem });
438
+ }
439
+ }
440
+ return;
441
+ }
442
+ // Gemfile.lock format
443
+ if (filename === 'Gemfile.lock') {
444
+ let inSpecs = false;
445
+ for (const line of lines) {
446
+ if (line.includes('specs:')) {
447
+ inSpecs = true;
448
+ continue;
449
+ }
450
+ if (inSpecs && line.startsWith(' ')) {
451
+ const match = line.match(/^\s+([a-zA-Z0-9_-]+)\s+\(([^)]+)\)/);
277
452
  if (match) {
278
- packages.push({ name: match[1], version: match[3].trim(), ecosystem: 'PyPI' });
453
+ packages.push({ name: match[1], version: match[2], ecosystem });
279
454
  }
280
455
  }
456
+ if (inSpecs && line.trim() && !line.startsWith(' ')) {
457
+ inSpecs = false;
458
+ }
281
459
  }
282
- // Parse go.mod
283
- const goMod = files.find(f => f === 'go.mod');
284
- if (goMod) {
285
- const content = readFileSync(join(resolvedPath, goMod), 'utf-8');
286
- for (const line of content.split('\n')) {
287
- const match = line.match(/^\s+([a-zA-Z0-9\/]+)\s+v?(.+)$/);
288
- if (match && !match[1].startsWith('gopkg.in') && !match[1].startsWith('github.com/')) {
289
- packages.push({ name: match[1], version: match[2].replace(/^v/, ''), ecosystem: 'Go' });
460
+ return;
461
+ }
462
+ // go.mod format
463
+ if (filename === 'go.mod') {
464
+ let inRequire = false;
465
+ for (const line of lines) {
466
+ if (line.startsWith('require (')) {
467
+ inRequire = true;
468
+ continue;
469
+ }
470
+ if (inRequire) {
471
+ if (line === ')') {
472
+ inRequire = false;
473
+ continue;
474
+ }
475
+ const match = line.match(/^\s*([a-zA-Z0-9\/]+)\s+v?(.+)$/);
476
+ if (match) {
477
+ packages.push({ name: match[1], version: match[2].replace(/^v/, ''), ecosystem });
290
478
  }
291
479
  }
480
+ // Single-line require
481
+ const singleMatch = line.match(/^require\s+([a-zA-Z0-9\/]+)\s+v?(.+)$/);
482
+ if (singleMatch) {
483
+ packages.push({ name: singleMatch[1], version: singleMatch[2].replace(/^v/, ''), ecosystem });
484
+ }
292
485
  }
293
- // Parse Cargo.lock
294
- const cargoLock = files.find(f => f === 'Cargo.lock');
295
- if (cargoLock) {
296
- const content = JSON.parse(readFileSync(join(resolvedPath, cargoLock), 'utf-8'));
297
- if (content.package) {
298
- for (const pkg of content.package) {
299
- if (pkg.name && pkg.version) {
300
- packages.push({ name: pkg.name, version: pkg.version, ecosystem: 'crates.io' });
301
- }
302
- }
486
+ return;
487
+ }
488
+ // go.sum format
489
+ if (filename === 'go.sum') {
490
+ for (const line of lines) {
491
+ const match = line.match(/^([a-zA-Z0-9\/]+)\s+v?([^\/\s]+)\//);
492
+ if (match) {
493
+ packages.push({ name: match[1], version: match[2].replace(/^v/, ''), ecosystem });
303
494
  }
304
495
  }
496
+ return;
305
497
  }
306
- catch (e) {
307
- // Ignore parse errors
498
+ // gradle.lockfile format
499
+ if (filename.includes('gradle.lockfile')) {
500
+ for (const line of lines) {
501
+ const match = line.match(/:([a-zA-Z0-9_-]+):([a-zA-Z0-9._-]+):([a-zA-Z0-9._-]+)/);
502
+ if (match) {
503
+ packages.push({ name: `${match[2]}:${match[3]}`, version: match[3], ecosystem });
504
+ }
505
+ }
506
+ return;
308
507
  }
309
- return packages;
310
508
  }
311
509
  export function scanDependencies(skillPath) {
312
510
  const findings = [];
package/dist/index.js CHANGED
@@ -5,12 +5,12 @@ import { auditSecurity } from "./security.js";
5
5
  import { validateSkillSpec } from "./spec.js";
6
6
  import { createGroupedAuditResult } from "./scoring.js";
7
7
  import { scanDependencies } from "./deps.js";
8
- import { getKEV, getEPSS, isCacheStale } from "./intel.js";
8
+ import { getKEV, getEPSS, getNVD, isCacheStale, downloadOfflineDB } from "./intel.js";
9
9
  import { writeFileSync } from "fs";
10
10
  // Build CLI - no subcommands, just options + action
11
11
  const program = new Command();
12
12
  program
13
- .name("skills-audit")
13
+ .name("skill-audit")
14
14
  .description("Security auditing CLI for AI agent skills")
15
15
  .version("0.1.0")
16
16
  .option("-g, --global", "Audit global skills only (default: true)")
@@ -23,11 +23,17 @@ program
23
23
  .option("--no-deps", "Skip dependency scanning (faster)")
24
24
  .option("--mode <mode>", "Audit mode: 'lint' (spec only) or 'audit' (full)", "audit")
25
25
  .option("--update-db", "Update advisory intelligence feeds")
26
- .option("--source <sources...>", "Sources for update-db: kev, epss, all", ["all"])
26
+ .option("--source <sources...>", "Sources for update-db: kev, epss, nvd, all", ["all"])
27
27
  .option("--strict", "Fail if feeds are stale")
28
- .option("--quiet", "Suppress non-error output");
28
+ .option("--quiet", "Suppress non-error output")
29
+ .option("--download-offline-db <dir>", "Download offline vulnerability databases to directory");
29
30
  program.parse(process.argv);
30
31
  const options = program.opts();
32
+ // Handle download-offline-db action
33
+ if (options.downloadOfflineDb) {
34
+ await downloadOfflineDB(options.downloadOfflineDb);
35
+ process.exit(0);
36
+ }
31
37
  // Handle update-db action
32
38
  if (options.updateDb) {
33
39
  await updateAdvisoryDB({ source: options.source, strict: options.strict });
@@ -75,7 +81,7 @@ reportGroupedResults(results, {
75
81
  mode
76
82
  });
77
83
  async function updateAdvisoryDB(opts) {
78
- const sources = opts.source.includes("all") ? ["kev", "epss"] : opts.source;
84
+ const sources = opts.source.includes("all") ? ["kev", "epss", "nvd"] : opts.source;
79
85
  const quiet = program.opts().quiet;
80
86
  if (!quiet) {
81
87
  console.log("📥 Updating advisory intelligence feeds...\n");
@@ -98,6 +104,12 @@ async function updateAdvisoryDB(opts) {
98
104
  console.log(` ✓ EPSS: ${result.findings.length} scores cached (stale: ${result.stale})`);
99
105
  }
100
106
  }
107
+ else if (source === "nvd") {
108
+ const result = await getNVD();
109
+ if (!quiet) {
110
+ console.log(` ✓ NVD: ${result.findings.length} CVEs cached (stale: ${result.stale})`);
111
+ }
112
+ }
101
113
  }
102
114
  catch (e) {
103
115
  console.error(` ✗ Failed to fetch ${source}:`, e);
@@ -159,8 +171,16 @@ function reportGroupedResults(results, options) {
159
171
  // Check cache freshness and warn if stale
160
172
  const kevStale = isCacheStale("kev");
161
173
  const epssStale = isCacheStale("epss");
162
- if (!options.json && (kevStale.warn || epssStale.warn)) {
163
- console.log(`\n⚠️ Vulnerability DB is stale (${kevStale.age?.toFixed(1)} days for KEV, ${epssStale.age?.toFixed(1)} days for EPSS)`);
174
+ const nvdStale = isCacheStale("nvd");
175
+ if (!options.json && (kevStale.warn || epssStale.warn || nvdStale.warn)) {
176
+ const ages = [];
177
+ if (kevStale.age)
178
+ ages.push(`${kevStale.age.toFixed(1)} days for KEV`);
179
+ if (epssStale.age)
180
+ ages.push(`${epssStale.age.toFixed(1)} days for EPSS`);
181
+ if (nvdStale.age)
182
+ ages.push(`${nvdStale.age.toFixed(1)} days for NVD`);
183
+ console.log(`\n⚠️ Vulnerability DB is stale (${ages.join(", ")})`);
164
184
  console.log(` Run: npx skill-audit --update-db`);
165
185
  }
166
186
  if (threshold !== undefined) {
package/dist/intel.js CHANGED
@@ -8,6 +8,8 @@ const METRICS_FILE = join(PACKAGE_ROOT, ".cache/skill-audit/metrics.json");
8
8
  // Cache configuration - differentiated by source update frequency
9
9
  const MAX_CACHE_AGE_DAYS = {
10
10
  kev: 1, // Daily updates - critical for actively exploited vulns
11
+ nvd: 1, // Daily - official NVD database updates frequently
12
+ ghsa: 3, // 3 days - GitHub Security Advisories
11
13
  epss: 3, // Matches FIRST.org update cycle
12
14
  osv: 7 // Stable database - weekly acceptable
13
15
  };
@@ -15,6 +17,21 @@ const WARN_CACHE_AGE_DAYS = 3;
15
17
  const FETCH_TIMEOUT_MS = 30000; // 30 seconds
16
18
  const MAX_RETRIES = 3;
17
19
  const RETRY_DELAY_MS = 1000; // Base delay for exponential backoff
20
+ // Map internal ecosystem names to GitHub GraphQL enum values
21
+ const GHSA_ECOSYSTEM_MAP = {
22
+ 'npm': 'NPM',
23
+ 'PyPI': 'PIP',
24
+ 'pypi': 'PIP',
25
+ 'crates.io': 'RUST',
26
+ 'RubyGems': 'RUBYGEMS',
27
+ 'Maven': 'MAVEN',
28
+ 'Packagist': 'COMPOSER',
29
+ 'Go': 'GO',
30
+ 'NuGet': 'NUGET',
31
+ 'Pub': 'PUB',
32
+ 'Hex': 'ERLANG',
33
+ 'SwiftURL': 'SWIFT',
34
+ };
18
35
  /**
19
36
  * Ensure cache directory exists
20
37
  */
@@ -101,6 +118,12 @@ function recordFetchResult(source, count, durationMs, error) {
101
118
  else if (source === 'epss') {
102
119
  metrics.epssCount = count;
103
120
  }
121
+ else if (source === 'nvd') {
122
+ metrics.nvdCount = count;
123
+ }
124
+ else if (source === 'ghsa') {
125
+ metrics.ghsaCount = count;
126
+ }
104
127
  if (error) {
105
128
  metrics.errors.push(`${source}: ${error}`);
106
129
  // Keep only last 10 errors
@@ -293,7 +316,7 @@ export async function queryGHSA(ecosystem, packageName) {
293
316
  }
294
317
  `,
295
318
  variables: {
296
- ecosystem: ecosystem.toUpperCase(),
319
+ ecosystem: GHSA_ECOSYSTEM_MAP[ecosystem] || ecosystem.toUpperCase(),
297
320
  package: packageName
298
321
  }
299
322
  })
@@ -386,6 +409,80 @@ export async function fetchEPSS() {
386
409
  return [];
387
410
  }
388
411
  }
412
+ /**
413
+ * Fetch NIST NVD (National Vulnerability Database)
414
+ * Uses NVD API v2.0 with CVSS scoring
415
+ * API: https://nvd.nist.gov/developers/vulnerabilities
416
+ */
417
+ export async function fetchNVD() {
418
+ const startTime = Date.now();
419
+ const apiKey = process.env.NVD_API_KEY;
420
+ // Calculate date range for last 24 hours
421
+ const now = new Date();
422
+ const yesterday = new Date(now.getTime() - 24 * 60 * 60 * 1000);
423
+ // NVD API requires ISO8601 format without milliseconds
424
+ const formatDate = (date) => date.toISOString().replace(/\.\d{3}Z$/, 'Z');
425
+ const lastModStartDate = formatDate(yesterday);
426
+ const lastModEndDate = formatDate(now);
427
+ const url = `https://services.nvd.nist.gov/rest/json/cves/2.0?lastModStartDate=${lastModStartDate}&lastModEndDate=${lastModEndDate}`;
428
+ try {
429
+ const headers = {
430
+ 'User-Agent': 'skill-audit/0.1.0 (Vulnerability Intelligence Scanner)'
431
+ };
432
+ if (apiKey) {
433
+ headers['apiKey'] = apiKey;
434
+ }
435
+ const response = await fetchWithRetry(url, FETCH_TIMEOUT_MS, { headers });
436
+ const data = await response.json();
437
+ if (!data.vulnerabilities) {
438
+ recordFetchResult('nvd', 0, Date.now() - startTime, 'No vulnerabilities in response');
439
+ return [];
440
+ }
441
+ const records = data.vulnerabilities.map(v => {
442
+ // Extract CVSS score (prefer v3.1, fallback to v3.0)
443
+ let cvss;
444
+ let cvssVector;
445
+ let severity;
446
+ if (v.cve.metrics?.cvssMetricV31?.[0]?.cvssData) {
447
+ const cvss31 = v.cve.metrics.cvssMetricV31[0].cvssData;
448
+ cvss = cvss31.baseScore;
449
+ cvssVector = cvss31.vectorString;
450
+ severity = cvss31.baseSeverity;
451
+ }
452
+ else if (v.cve.metrics?.cvssMetricV30?.[0]?.cvssData) {
453
+ const cvss30 = v.cve.metrics.cvssMetricV30[0].cvssData;
454
+ cvss = cvss30.baseScore;
455
+ cvssVector = cvss30.vectorString;
456
+ severity = cvss30.baseSeverity;
457
+ }
458
+ // Extract CWE
459
+ const cwe = v.cve.weaknesses?.[0]?.description?.map(d => d.value) || [];
460
+ // Extract description as summary
461
+ const summary = v.cve.descriptions?.find(d => d.lang === 'en')?.value;
462
+ return {
463
+ id: v.cve.id,
464
+ aliases: [v.cve.id],
465
+ source: "NVD",
466
+ severity,
467
+ cvss,
468
+ cvssVector,
469
+ cwe,
470
+ published: v.cve.published,
471
+ modified: v.cve.lastModified,
472
+ summary,
473
+ references: v.cve.references?.map(r => r.url) || []
474
+ };
475
+ });
476
+ recordFetchResult('nvd', records.length, Date.now() - startTime);
477
+ return records;
478
+ }
479
+ catch (error) {
480
+ const errorMsg = error instanceof Error ? error.message : 'Unknown error';
481
+ recordFetchResult('nvd', 0, Date.now() - startTime, errorMsg);
482
+ console.error(`NVD fetch failed:`, error);
483
+ return [];
484
+ }
485
+ }
389
486
  /**
390
487
  * Query vulnerability intelligence for a package
391
488
  */
@@ -443,6 +540,48 @@ export async function getEPSS() {
443
540
  warn
444
541
  };
445
542
  }
543
+ /**
544
+ * Get NVD vulnerabilities (enriched)
545
+ */
546
+ export async function getNVD() {
547
+ const { stale, age, warn } = isCacheStale("nvd");
548
+ let records = loadFromCache("nvd");
549
+ if (records.length === 0 || stale) {
550
+ records = await fetchNVD();
551
+ if (records.length > 0) {
552
+ saveToCache("nvd", records);
553
+ }
554
+ }
555
+ return {
556
+ findings: records,
557
+ cacheAge: age,
558
+ stale,
559
+ warn
560
+ };
561
+ }
562
+ /**
563
+ * Get GHSA advisories (enriched)
564
+ */
565
+ export async function getGHSA() {
566
+ const { stale, age, warn } = isCacheStale("ghsa");
567
+ let records = loadFromCache("ghsa");
568
+ if (records.length === 0 || stale) {
569
+ // GHSA doesn't have a bulk feed - would need to query per-package
570
+ // For now, return empty - GHSA integration is via queryGHSA() per-package
571
+ return {
572
+ findings: [],
573
+ cacheAge: age,
574
+ stale,
575
+ warn
576
+ };
577
+ }
578
+ return {
579
+ findings: records,
580
+ cacheAge: age,
581
+ stale,
582
+ warn
583
+ };
584
+ }
446
585
  /**
447
586
  * Merge advisory records by alias
448
587
  */
@@ -484,3 +623,71 @@ export function prioritizeRecords(records) {
484
623
  return 0;
485
624
  });
486
625
  }
626
+ /**
627
+ * Download offline vulnerability databases
628
+ * @param outputDir - Directory to save offline databases
629
+ * @returns Object with download statistics
630
+ */
631
+ export async function downloadOfflineDB(outputDir) {
632
+ const results = {
633
+ kev: { success: false, count: 0 },
634
+ epss: { success: false, count: 0 },
635
+ nvd: { success: false, count: 0 },
636
+ osv: { success: false, message: '' }
637
+ };
638
+ try {
639
+ // Ensure output directory exists
640
+ if (!existsSync(outputDir)) {
641
+ mkdirSync(outputDir, { recursive: true });
642
+ }
643
+ // Download KEV
644
+ console.log('📥 Downloading CISA KEV...');
645
+ const kevRecords = await fetchKEV();
646
+ if (kevRecords.length > 0) {
647
+ writeFileSync(join(outputDir, 'kev.json'), JSON.stringify({ fetchedAt: new Date().toISOString(), records: kevRecords }, null, 2));
648
+ results.kev = { success: true, count: kevRecords.length };
649
+ console.log(` ✓ KEV: ${kevRecords.length} vulnerabilities`);
650
+ }
651
+ // Download EPSS
652
+ console.log('📥 Downloading EPSS scores...');
653
+ const epssRecords = await fetchEPSS();
654
+ if (epssRecords.length > 0) {
655
+ writeFileSync(join(outputDir, 'epss.json'), JSON.stringify({ fetchedAt: new Date().toISOString(), records: epssRecords }, null, 2));
656
+ results.epss = { success: true, count: epssRecords.length };
657
+ console.log(` ✓ EPSS: ${epssRecords.length} scores`);
658
+ }
659
+ // Download NVD
660
+ console.log('📥 Downloading NIST NVD...');
661
+ const nvdRecords = await fetchNVD();
662
+ if (nvdRecords.length > 0) {
663
+ writeFileSync(join(outputDir, 'nvd.json'), JSON.stringify({ fetchedAt: new Date().toISOString(), records: nvdRecords }, null, 2));
664
+ results.nvd = { success: true, count: nvdRecords.length };
665
+ console.log(` ✓ NVD: ${nvdRecords.length} CVEs`);
666
+ }
667
+ // Note: OSV is query-based, not a bulk download
668
+ // Users would need to query OSV API per-package
669
+ results.osv = {
670
+ success: true,
671
+ message: 'OSV uses on-demand API queries (not bulk download). Use OSV CLI for offline scanning.'
672
+ };
673
+ console.log(' ℹ️ OSV: Query-based API (use --update-db for caching)');
674
+ // Save metadata
675
+ const metadata = {
676
+ downloadedAt: new Date().toISOString(),
677
+ sources: results,
678
+ cacheAges: {
679
+ kev: MAX_CACHE_AGE_DAYS.kev,
680
+ epss: MAX_CACHE_AGE_DAYS.epss,
681
+ nvd: MAX_CACHE_AGE_DAYS.nvd,
682
+ osv: MAX_CACHE_AGE_DAYS.osv
683
+ }
684
+ };
685
+ writeFileSync(join(outputDir, 'metadata.json'), JSON.stringify(metadata, null, 2));
686
+ console.log('\n✅ Offline databases downloaded to:', outputDir);
687
+ }
688
+ catch (error) {
689
+ console.error('❌ Download failed:', error);
690
+ results.osv.message = error instanceof Error ? error.message : 'Download error';
691
+ }
692
+ return results;
693
+ }
@@ -0,0 +1,67 @@
1
+ import { readFileSync, existsSync } from "fs";
2
+ import { join, dirname } from "path";
3
+ import { fileURLToPath } from "url";
4
+ const PACKAGE_ROOT = join(dirname(fileURLToPath(import.meta.url)), "..");
5
+ const RULES_DIR = join(PACKAGE_ROOT, "rules");
6
+ const DEFAULT_PATTERNS_FILE = join(RULES_DIR, "default-patterns.json");
7
+ /**
8
+ * Load patterns from JSON file
9
+ */
10
+ export function loadPatterns(patternsFile = DEFAULT_PATTERNS_FILE) {
11
+ if (!existsSync(patternsFile)) {
12
+ throw new Error(`Patterns file not found: ${patternsFile}`);
13
+ }
14
+ const content = readFileSync(patternsFile, "utf-8");
15
+ return JSON.parse(content);
16
+ }
17
+ /**
18
+ * Compile patterns to RegExp objects
19
+ */
20
+ export function compilePatterns(patterns) {
21
+ const compiled = new Map();
22
+ for (const [categoryKey, category] of Object.entries(patterns.categories)) {
23
+ const categoryPatterns = [];
24
+ for (const rule of category.patterns) {
25
+ try {
26
+ const regex = new RegExp(rule.pattern, rule.flags || "i");
27
+ categoryPatterns.push({
28
+ regex,
29
+ id: rule.id,
30
+ severity: rule.severity,
31
+ message: rule.message,
32
+ category: categoryKey
33
+ });
34
+ }
35
+ catch (error) {
36
+ console.error(`Failed to compile pattern ${rule.id}:`, error);
37
+ }
38
+ }
39
+ compiled.set(categoryKey, categoryPatterns);
40
+ }
41
+ return compiled;
42
+ }
43
+ /**
44
+ * Load and compile patterns in one step
45
+ */
46
+ export function loadAndCompile(patternsFile) {
47
+ const patterns = loadPatterns(patternsFile);
48
+ return compilePatterns(patterns);
49
+ }
50
+ /**
51
+ * Get pattern metadata (version, update date)
52
+ */
53
+ export function getPatternMetadata(patternsFile = DEFAULT_PATTERNS_FILE) {
54
+ try {
55
+ const patterns = loadPatterns(patternsFile);
56
+ return { version: patterns.version, updated: patterns.updated };
57
+ }
58
+ catch {
59
+ return { version: "unknown", updated: "unknown" };
60
+ }
61
+ }
62
+ /**
63
+ * Check if patterns file exists
64
+ */
65
+ export function hasPatternsFile(patternsFile = DEFAULT_PATTERNS_FILE) {
66
+ return existsSync(patternsFile);
67
+ }
package/dist/security.js CHANGED
@@ -1,6 +1,7 @@
1
1
  import { readFileSync } from "fs";
2
2
  import { basename, extname } from "path";
3
3
  import { resolveSkillPath, getSkillFiles } from "./discover.js";
4
+ import { loadAndCompile, hasPatternsFile, getPatternMetadata } from "./patterns.js";
4
5
  /**
5
6
  * Phase 1 - Layer 2: Security Auditor
6
7
  *
@@ -13,7 +14,37 @@ import { resolveSkillPath, getSkillFiles } from "./discover.js";
13
14
  * - ASI04: Secrets / Supply Chain
14
15
  * - ASI05: Code Execution
15
16
  * - ASI09: Behavioral Manipulation
17
+ *
18
+ * Pattern sources:
19
+ * 1. External patterns file (rules/default-patterns.json) - preferred
20
+ * 2. Hardcoded fallback patterns - used if external file missing
21
+ */
22
+ // ============================================================
23
+ // Pattern Loading
24
+ // ============================================================
25
+ let compiledPatterns = null;
26
+ let patternMetadata = { version: "unknown", updated: "unknown" };
27
+ /**
28
+ * Initialize patterns (load from file or use hardcoded fallback)
16
29
  */
30
+ function initPatterns() {
31
+ if (compiledPatterns) {
32
+ return compiledPatterns;
33
+ }
34
+ try {
35
+ if (hasPatternsFile()) {
36
+ compiledPatterns = loadAndCompile();
37
+ patternMetadata = getPatternMetadata();
38
+ return compiledPatterns;
39
+ }
40
+ }
41
+ catch (error) {
42
+ console.warn("Failed to load external patterns, using hardcoded fallback:", error);
43
+ }
44
+ // Fallback to hardcoded patterns (original implementation)
45
+ compiledPatterns = new Map();
46
+ return compiledPatterns;
47
+ }
17
48
  // ============================================================
18
49
  // PROMPT INJECTION PATTERNS (ASI01 - Goal Hijacking)
19
50
  // ============================================================
@@ -164,13 +195,19 @@ function getASIXXFromId(id) {
164
195
  function scanContent(content, file, patterns) {
165
196
  const findings = [];
166
197
  const lines = content.split("\n");
167
- for (const { pattern, id, severity = "medium", message } of patterns) {
198
+ for (const patternDef of patterns) {
199
+ const regex = 'regex' in patternDef ? patternDef.regex : patternDef.pattern;
200
+ const id = patternDef.id;
201
+ const severity = 'severity' in patternDef ? patternDef.severity : patternDef.severity || "medium";
202
+ const message = patternDef.message;
203
+ const category = 'category' in patternDef ? patternDef.category : getCategoryFromId(id);
204
+ const asixx = 'category' in patternDef ? mapCategoryToASIXX(category) : getASIXXFromId(id);
168
205
  for (let i = 0; i < lines.length; i++) {
169
- if (pattern.test(lines[i])) {
206
+ if (regex.test(lines[i])) {
170
207
  findings.push({
171
208
  id,
172
- category: getCategoryFromId(id),
173
- asixx: getASIXXFromId(id),
209
+ category: category,
210
+ asixx,
174
211
  severity: severity,
175
212
  file,
176
213
  line: i + 1,
@@ -182,6 +219,18 @@ function scanContent(content, file, patterns) {
182
219
  }
183
220
  return findings;
184
221
  }
222
+ function mapCategoryToASIXX(category) {
223
+ const map = {
224
+ "promptInjection": "ASI01",
225
+ "credentialLeaks": "ASI04",
226
+ "shellInjection": "ASI05",
227
+ "exfiltration": "ASI02",
228
+ "secrets": "ASI04",
229
+ "toolMisuse": "ASI02",
230
+ "behavioral": "ASI09"
231
+ };
232
+ return map[category] || "ASI04";
233
+ }
185
234
  function scanCodeBlocksInMarkdown(content, file) {
186
235
  const findings = [];
187
236
  const codeBlockRegex = /```(\w+)?\n([\s\S]*?)```/g;
@@ -283,6 +332,9 @@ export function auditSecurity(skill, manifest) {
283
332
  unreadableFiles: []
284
333
  };
285
334
  }
335
+ // Initialize patterns (load from file or use hardcoded fallback)
336
+ const patterns = initPatterns();
337
+ const hasExternalPatterns = patterns.size > 0;
286
338
  const files = getSkillFiles(resolvedPath);
287
339
  const findings = [];
288
340
  const unreadableFiles = [];
@@ -290,20 +342,49 @@ export function auditSecurity(skill, manifest) {
290
342
  const filename = basename(file);
291
343
  try {
292
344
  const content = readFileSync(file, "utf-8");
293
- if (filename === "SKILL.md" || filename === "AGENTS.md") {
294
- findings.push(...scanContent(content, file, PROMPT_INJECTION_PATTERNS));
295
- findings.push(...scanContent(content, file, CREDENTIAL_PATTERNS_MD));
296
- findings.push(...scanContent(content, file, EXFILTRATION_PATTERNS));
297
- findings.push(...scanContent(content, file, BEHAVIORAL_PATTERNS));
298
- findings.push(...scanContent(content, file, DANGEROUS_PATTERNS));
345
+ if (filename === "SKILL.md" || filename === "SKILL.md") {
346
+ // Use external patterns if available, otherwise use hardcoded
347
+ if (hasExternalPatterns) {
348
+ const piPatterns = patterns.get("promptInjection") || [];
349
+ const clPatterns = patterns.get("credentialLeaks") || [];
350
+ const exPatterns = patterns.get("exfiltration") || [];
351
+ const bmPatterns = patterns.get("behavioral") || [];
352
+ const cePatterns = patterns.get("shellInjection") || [];
353
+ findings.push(...scanContent(content, file, piPatterns));
354
+ findings.push(...scanContent(content, file, clPatterns));
355
+ findings.push(...scanContent(content, file, exPatterns));
356
+ findings.push(...scanContent(content, file, bmPatterns));
357
+ findings.push(...scanContent(content, file, cePatterns));
358
+ }
359
+ else {
360
+ findings.push(...scanContent(content, file, PROMPT_INJECTION_PATTERNS));
361
+ findings.push(...scanContent(content, file, CREDENTIAL_PATTERNS_MD));
362
+ findings.push(...scanContent(content, file, EXFILTRATION_PATTERNS));
363
+ findings.push(...scanContent(content, file, BEHAVIORAL_PATTERNS));
364
+ findings.push(...scanContent(content, file, DANGEROUS_PATTERNS));
365
+ }
299
366
  findings.push(...scanCodeBlocksInMarkdown(content, file));
300
367
  }
301
368
  else if (isCodeFile(file)) {
302
- findings.push(...scanContent(content, file, CREDENTIAL_PATTERNS_CODE));
303
- findings.push(...scanContent(content, file, EXFILTRATION_PATTERNS));
304
- findings.push(...scanContent(content, file, DANGEROUS_PATTERNS));
305
- findings.push(...scanContent(content, file, SECRET_PATTERNS));
306
- findings.push(...scanContent(content, file, TOOL_MISUSE_PATTERNS));
369
+ if (hasExternalPatterns) {
370
+ const clPatterns = patterns.get("credentialLeaks") || [];
371
+ const exPatterns = patterns.get("exfiltration") || [];
372
+ const cePatterns = patterns.get("shellInjection") || [];
373
+ const scPatterns = patterns.get("secrets") || [];
374
+ const tmPatterns = patterns.get("toolMisuse") || [];
375
+ findings.push(...scanContent(content, file, clPatterns));
376
+ findings.push(...scanContent(content, file, exPatterns));
377
+ findings.push(...scanContent(content, file, cePatterns));
378
+ findings.push(...scanContent(content, file, scPatterns));
379
+ findings.push(...scanContent(content, file, tmPatterns));
380
+ }
381
+ else {
382
+ findings.push(...scanContent(content, file, CREDENTIAL_PATTERNS_CODE));
383
+ findings.push(...scanContent(content, file, EXFILTRATION_PATTERNS));
384
+ findings.push(...scanContent(content, file, DANGEROUS_PATTERNS));
385
+ findings.push(...scanContent(content, file, SECRET_PATTERNS));
386
+ findings.push(...scanContent(content, file, TOOL_MISUSE_PATTERNS));
387
+ }
307
388
  }
308
389
  }
309
390
  catch (e) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@hungpg/skill-audit",
3
- "version": "0.1.1",
3
+ "version": "0.2.0",
4
4
  "description": "Security auditing CLI for AI agent skills",
5
5
  "type": "module",
6
6
  "bin": {