muaddib-scanner 2.10.49 → 2.10.51

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -282,6 +282,8 @@ repos:
282
282
 
283
283
  | Metric | Result | Details |
284
284
  |--------|--------|---------|
285
+ | **ML FPR** | **2.85%** (239/8,393 holdout) | XGBoost retrained on 56,564 samples, 64 features, threshold=0.710 |
286
+ | **ML TPR** | **99.93%** (2,918/2,920 holdout) | 377 confirmed_malicious via OSSF/GHSA/npm correlation |
285
287
  | **Wild TPR** (Datadog 17K) | **92.8%** (13,538/14,587 in-scope) | 17,922 packages. 3,335 skipped (no JS). By category: compromised_lib 97.8%, malicious_intent 92.1% |
286
288
  | **TPR** (Ground Truth) | **93.9%** (46/49) | 51 real attacks. 3 out-of-scope: browser-only |
287
289
  | **FPR** (Benign curated) | **10.6%** (56/529) | 529 npm packages, real source via `npm pack` |
@@ -290,7 +292,13 @@ repos:
290
292
 
291
293
  **3034 tests** across 65 files. **200 rules** (195 RULES + 5 PARANOID).
292
294
 
293
- > **Methodology caveats:**
295
+ > **ML retrain methodology (v2.10.51):**
296
+ > - Ground truth: 377 confirmed_malicious via auto-labeler (OSSF malicious-packages, GitHub Advisory Database, npm registry takedown correlation)
297
+ > - Dataset: 56,564 samples (14,602 malicious, 41,962 clean). Stratified 80/20 split
298
+ > - Grid search: depth=4, estimators=300, lr=0.05. AUC-ROC=0.999, F1=0.960
299
+ > - Leaky feature filter: 23 dead/leaky features removed (source-identity proxies)
300
+ >
301
+ > **Static evaluation caveats:**
294
302
  > - TPR measured on 49 Node.js attack samples (3 browser-only excluded from 51 total)
295
303
  > - FPR measured on 529 curated popular npm packages (not a random sample)
296
304
  > - ADR measured with global threshold (score >= 20) as of v2.6.5
@@ -0,0 +1,312 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ MUAD'DIB Auto-Labeling Pipeline
4
+
5
+ Correlates muaddib suspects with external signals (OSSF, GHSA, npm status)
6
+ to produce ground truth labels for ML training.
7
+
8
+ Usage:
9
+ python auto_labeler.py --full # Run all steps
10
+ python auto_labeler.py --step ossf # Run individual step
11
+ python auto_labeler.py --step npm
12
+ python auto_labeler.py --step ghsa
13
+ python auto_labeler.py --step label
14
+ python auto_labeler.py --update # Cron mode: re-check pending/unconfirmed
15
+
16
+ Environment:
17
+ GITHUB_TOKEN Optional, for higher GHSA API rate limits
18
+ MUADDIB_DATA Override data directory (default: /opt/muaddib/data)
19
+ """
20
+
21
+ import argparse
22
+ import json
23
+ import logging
24
+ import os
25
+ import sys
26
+ from datetime import datetime, timezone
27
+ from pathlib import Path
28
+
29
+ import ossf_index
30
+ import ghsa_checker
31
+ import npm_checker
32
+ import labeler
33
+
34
+ # ── Paths ──
35
+ MUADDIB_DATA = Path(os.environ.get("MUADDIB_DATA", "/opt/muaddib/data"))
36
+ MUADDIB_ALERTS = Path(os.environ.get("MUADDIB_ALERTS", "/opt/muaddib/logs/alerts"))
37
+ BASE_DIR = Path(__file__).parent
38
+ CACHE_DIR = BASE_DIR / "data"
39
+ OSSF_REPO_DIR = CACHE_DIR / "ossf-malicious-packages"
40
+ OUTPUT_PATH = MUADDIB_DATA / "auto-labels.json"
41
+
42
+ log = logging.getLogger("auto-labeler")
43
+
44
+
45
+ def setup_logging(verbose=False):
46
+ level = logging.DEBUG if verbose else logging.INFO
47
+ fmt = "%(asctime)s [%(name)s] %(levelname)s %(message)s"
48
+ logging.basicConfig(level=level, format=fmt, datefmt="%Y-%m-%d %H:%M:%S")
49
+
50
+
51
+ def load_detections():
52
+ """Load detections.json from muaddib data directory."""
53
+ path = MUADDIB_DATA / "detections.json"
54
+ if not path.is_file():
55
+ log.error("detections.json not found at %s", path)
56
+ sys.exit(1)
57
+
58
+ with open(path, "r", encoding="utf-8") as f:
59
+ data = json.load(f)
60
+
61
+ detections = data.get("detections", [])
62
+ npm_count = sum(1 for d in detections if d.get("ecosystem") == "npm")
63
+ log.info("Loaded %d detections (%d npm)", len(detections), npm_count)
64
+ return detections
65
+
66
+
67
+ def load_alert_scores():
68
+ """Load risk scores and tiers from individual alert files.
69
+
70
+ Scans logs/alerts/ for JSON files and extracts score + tier info.
71
+ Returns dict keyed by "name@version".
72
+ """
73
+ scores = {}
74
+
75
+ # Try cached scores first
76
+ cache_path = CACHE_DIR / "alert-scores-cache.json"
77
+ if cache_path.is_file():
78
+ try:
79
+ with open(cache_path, "r", encoding="utf-8") as f:
80
+ cached = json.load(f)
81
+ if cached.get("count", 0) > 0:
82
+ log.info("Loaded %d cached alert scores", cached["count"])
83
+ return cached.get("scores", {})
84
+ except (json.JSONDecodeError, OSError):
85
+ pass
86
+
87
+ if not MUADDIB_ALERTS.is_dir():
88
+ log.warning("Alerts directory not found at %s — scores will be estimated from severity",
89
+ MUADDIB_ALERTS)
90
+ return scores
91
+
92
+ alert_files = list(MUADDIB_ALERTS.glob("*.json"))
93
+ log.info("Scanning %d alert files for scores...", len(alert_files))
94
+
95
+ for filepath in alert_files:
96
+ try:
97
+ with open(filepath, "r", encoding="utf-8") as f:
98
+ alert = json.load(f)
99
+
100
+ target = alert.get("target", "")
101
+ summary = alert.get("summary", {})
102
+ score = summary.get("riskScore", summary.get("globalRiskScore", 0))
103
+
104
+ # Parse target: "npm/package-name@version" or "pypi/package@version"
105
+ if "/" in target and "@" in target:
106
+ eco_pkg = target.split("/", 1)
107
+ if len(eco_pkg) == 2:
108
+ pkg_ver = eco_pkg[1]
109
+ # Determine tier from priority
110
+ priority = alert.get("priority", {})
111
+ tier = ""
112
+ p_level = priority.get("level", "")
113
+ if p_level == "P1":
114
+ tier = "T1a"
115
+ elif p_level == "P2":
116
+ tier = "T1b"
117
+ elif p_level == "P3":
118
+ tier = "T2"
119
+
120
+ scores[pkg_ver] = {"score": score, "tier": tier}
121
+
122
+ except (json.JSONDecodeError, OSError):
123
+ continue
124
+
125
+ # Cache for next run
126
+ CACHE_DIR.mkdir(parents=True, exist_ok=True)
127
+ with open(cache_path, "w", encoding="utf-8") as f:
128
+ json.dump({"count": len(scores), "built_at": datetime.now(timezone.utc).isoformat(),
129
+ "scores": scores}, f)
130
+
131
+ log.info("Extracted scores from %d alerts", len(scores))
132
+ return scores
133
+
134
+
135
+ # ── Steps ──
136
+
137
+ def step_ossf():
138
+ """Step 1: Index OSSF malicious-packages."""
139
+ log.info("=== Step 1: OSSF Index ===")
140
+ ossf_index.clone_or_update(OSSF_REPO_DIR)
141
+ index = ossf_index.build_index(OSSF_REPO_DIR, CACHE_DIR)
142
+ return index
143
+
144
+
145
+ def step_ghsa():
146
+ """Step 3: Index GitHub Advisory Database."""
147
+ log.info("=== Step 3: GHSA Index ===")
148
+ # Try cache first
149
+ index = ghsa_checker.load_cached_index(CACHE_DIR)
150
+ if index is not None:
151
+ return index
152
+ return ghsa_checker.build_index(CACHE_DIR)
153
+
154
+
155
+ def step_npm(detections):
156
+ """Step 2: Check npm status for suspects."""
157
+ log.info("=== Step 2: npm Status Check ===")
158
+ return npm_checker.check_suspects(detections, CACHE_DIR)
159
+
160
+
161
+ def step_label(detections, o_index, g_index, npm_status, alert_scores):
162
+ """Step 4: Generate labels."""
163
+ log.info("=== Step 4: Generate Labels ===")
164
+
165
+ labels = labeler.label_suspects(detections, o_index, g_index, npm_status, alert_scores)
166
+ missed = labeler.find_missed(o_index, g_index, detections)
167
+ summary = labeler.export_labels(labels, missed, OUTPUT_PATH)
168
+
169
+ return summary
170
+
171
+
172
+ # ── Modes ──
173
+
174
+ def run_full():
175
+ """Run all steps sequentially."""
176
+ log.info("Starting full auto-labeling pipeline")
177
+ start = datetime.now()
178
+
179
+ detections = load_detections()
180
+ alert_scores = load_alert_scores()
181
+
182
+ # Steps 1+3 don't depend on detections — could be parallel but keep it simple
183
+ o_index = step_ossf()
184
+ g_index = step_ghsa()
185
+ npm_status = step_npm(detections)
186
+
187
+ summary = step_label(detections, o_index, g_index, npm_status, alert_scores)
188
+
189
+ elapsed = (datetime.now() - start).total_seconds()
190
+ log.info("Pipeline complete in %.1fs — %s", elapsed, summary)
191
+ return summary
192
+
193
+
194
+ def run_update():
195
+ """Cron mode: re-check pending/unconfirmed labels against fresh external data."""
196
+ log.info("Starting update (cron mode)")
197
+
198
+ # Refresh external indices
199
+ ossf_index.clone_or_update(OSSF_REPO_DIR)
200
+ o_index = ossf_index.build_index(OSSF_REPO_DIR, CACHE_DIR)
201
+ g_index = ghsa_checker.build_index(CACHE_DIR)
202
+
203
+ # Load existing labels
204
+ if not OUTPUT_PATH.is_file():
205
+ log.error("No existing auto-labels.json — run --full first")
206
+ sys.exit(1)
207
+
208
+ with open(OUTPUT_PATH, "r", encoding="utf-8") as f:
209
+ existing = json.load(f)
210
+
211
+ existing_labels = existing.get("labels", {})
212
+ detections = load_detections()
213
+ alert_scores = load_alert_scores()
214
+
215
+ # Find labels that need re-evaluation
216
+ to_recheck = []
217
+ for key, entry in existing_labels.items():
218
+ lbl = entry.get("auto_label")
219
+ if lbl in ("pending", "unconfirmed", "likely_malicious"):
220
+ # Re-extract detection info
221
+ for det in detections:
222
+ if f"{det['package']}@{det['version']}" == key:
223
+ to_recheck.append(det)
224
+ break
225
+
226
+ if not to_recheck:
227
+ log.info("No pending/unconfirmed labels to re-check")
228
+ return
229
+
230
+ log.info("Re-checking %d labels (pending/unconfirmed/likely_malicious)", len(to_recheck))
231
+
232
+ # Re-check npm status for these specific packages
233
+ npm_status = npm_checker.check_suspects(to_recheck, CACHE_DIR)
234
+
235
+ # Re-label
236
+ updated = labeler.label_suspects(to_recheck, o_index, g_index, npm_status, alert_scores)
237
+
238
+ # Merge updates into existing labels
239
+ changes = 0
240
+ for key, new_entry in updated.items():
241
+ old = existing_labels.get(key, {})
242
+ if old.get("auto_label") != new_entry.get("auto_label"):
243
+ log.info("RELABEL: %s — %s → %s",
244
+ key, old.get("auto_label"), new_entry.get("auto_label"))
245
+ changes += 1
246
+ existing_labels[key] = new_entry
247
+
248
+ # Also refresh missed detection
249
+ missed = labeler.find_missed(o_index, g_index, detections)
250
+ for name, info in missed.items():
251
+ mk = f"{name}@*"
252
+ if mk not in existing_labels:
253
+ existing_labels[mk] = info
254
+ changes += 1
255
+
256
+ # Re-export
257
+ labeler.export_labels(
258
+ {k: v for k, v in existing_labels.items() if v.get("auto_label") != "missed"},
259
+ {k.replace("@*", ""): v for k, v in existing_labels.items() if v.get("auto_label") == "missed"},
260
+ OUTPUT_PATH,
261
+ )
262
+
263
+ log.info("Update complete: %d labels changed", changes)
264
+
265
+
266
+ def main():
267
+ parser = argparse.ArgumentParser(description="MUAD'DIB Auto-Labeling Pipeline")
268
+ group = parser.add_mutually_exclusive_group(required=True)
269
+ group.add_argument("--full", action="store_true", help="Run all steps")
270
+ group.add_argument("--step", choices=["ossf", "ghsa", "npm", "label"],
271
+ help="Run individual step")
272
+ group.add_argument("--update", action="store_true",
273
+ help="Cron: re-check pending/unconfirmed")
274
+ parser.add_argument("-v", "--verbose", action="store_true", help="Debug logging")
275
+ parser.add_argument("--data-dir", help="Override MUADDIB_DATA path")
276
+ parser.add_argument("--alerts-dir", help="Override MUADDIB_ALERTS path")
277
+ args = parser.parse_args()
278
+
279
+ setup_logging(args.verbose)
280
+
281
+ if args.data_dir:
282
+ global MUADDIB_DATA, OUTPUT_PATH
283
+ MUADDIB_DATA = Path(args.data_dir)
284
+ OUTPUT_PATH = MUADDIB_DATA / "auto-labels.json"
285
+ if args.alerts_dir:
286
+ global MUADDIB_ALERTS
287
+ MUADDIB_ALERTS = Path(args.alerts_dir)
288
+
289
+ if args.full:
290
+ run_full()
291
+ elif args.update:
292
+ run_update()
293
+ elif args.step == "ossf":
294
+ step_ossf()
295
+ elif args.step == "ghsa":
296
+ step_ghsa()
297
+ elif args.step == "npm":
298
+ step_npm(load_detections())
299
+ elif args.step == "label":
300
+ detections = load_detections()
301
+ alert_scores = load_alert_scores()
302
+ o_index = ossf_index.load_cached_index(CACHE_DIR)
303
+ g_index = ghsa_checker.load_cached_index(CACHE_DIR)
304
+ npm_status = npm_checker._load_cache(CACHE_DIR / npm_checker.CACHE_FILENAME)
305
+ if o_index is None or g_index is None:
306
+ log.error("Run --step ossf and --step ghsa first (or use --full)")
307
+ sys.exit(1)
308
+ step_label(detections, o_index, g_index, npm_status, alert_scores)
309
+
310
+
311
+ if __name__ == "__main__":
312
+ main()
@@ -0,0 +1,169 @@
1
+ """
2
+ GitHub Advisory Database checker.
3
+
4
+ Fetches all npm malware advisories from the GitHub Advisory Database API.
5
+ Supports optional GITHUB_TOKEN env var for higher rate limits.
6
+ """
7
+
8
+ import json
9
+ import logging
10
+ import os
11
+ import time
12
+ from datetime import datetime
13
+ from pathlib import Path
14
+
15
+ import requests
16
+
17
+ log = logging.getLogger("auto-labeler.ghsa")
18
+
19
+ GHSA_API = "https://api.github.com/advisories"
20
+ INDEX_FILENAME = "ghsa-index.json"
21
+ # Cache validity: 12 hours
22
+ CACHE_TTL_SECONDS = 12 * 3600
23
+
24
+
25
+ def _get_headers():
26
+ token = os.environ.get("GITHUB_TOKEN")
27
+ headers = {"Accept": "application/vnd.github+json"}
28
+ if token:
29
+ headers["Authorization"] = f"Bearer {token}"
30
+ log.info("Using GITHUB_TOKEN for GHSA API (5000 req/h)")
31
+ else:
32
+ log.info("No GITHUB_TOKEN — GHSA API limited to 60 req/h")
33
+ return headers
34
+
35
+
36
+ def fetch_malware_advisories():
37
+ """Fetch all npm malware advisories from GHSA. Returns list of advisories."""
38
+ headers = _get_headers()
39
+ advisories = []
40
+ page = 1
41
+ per_page = 100
42
+
43
+ while True:
44
+ params = {
45
+ "type": "malware",
46
+ "ecosystem": "npm",
47
+ "per_page": per_page,
48
+ "page": page,
49
+ }
50
+
51
+ for attempt in range(3):
52
+ try:
53
+ resp = requests.get(GHSA_API, headers=headers, params=params, timeout=30)
54
+
55
+ if resp.status_code == 403:
56
+ # Rate limited
57
+ retry_after = int(resp.headers.get("Retry-After", 60))
58
+ log.warning("GHSA rate limited, waiting %ds", retry_after)
59
+ time.sleep(retry_after)
60
+ continue
61
+
62
+ resp.raise_for_status()
63
+ break
64
+ except requests.RequestException as e:
65
+ wait = 2 ** attempt * 5
66
+ log.warning("GHSA request failed (attempt %d): %s — retrying in %ds",
67
+ attempt + 1, e, wait)
68
+ time.sleep(wait)
69
+ else:
70
+ log.error("GHSA fetch failed after 3 attempts on page %d", page)
71
+ break
72
+
73
+ batch = resp.json()
74
+ if not batch:
75
+ break
76
+
77
+ advisories.extend(batch)
78
+ log.info("GHSA page %d: %d advisories (total: %d)", page, len(batch), len(advisories))
79
+
80
+ if len(batch) < per_page:
81
+ break
82
+ page += 1
83
+ time.sleep(1) # Courtesy delay
84
+
85
+ return advisories
86
+
87
+
88
+ def build_index(cache_dir):
89
+ """Build GHSA index from API. Returns dict keyed by package name."""
90
+ advisories = fetch_malware_advisories()
91
+ index = {}
92
+
93
+ for adv in advisories:
94
+ ghsa_id = adv.get("ghsa_id", "")
95
+ published = adv.get("published_at", "")
96
+ summary = adv.get("summary", "")
97
+ withdrawn = adv.get("withdrawn_at")
98
+
99
+ # Skip withdrawn advisories
100
+ if withdrawn:
101
+ continue
102
+
103
+ for vuln in adv.get("vulnerabilities", []):
104
+ pkg = vuln.get("package", {})
105
+ ecosystem = pkg.get("ecosystem", "").lower()
106
+ name = pkg.get("name", "")
107
+
108
+ if ecosystem != "npm" or not name:
109
+ continue
110
+
111
+ version_range = vuln.get("vulnerable_version_range", "")
112
+
113
+ entry = {
114
+ "source": "ghsa",
115
+ "ghsa_id": ghsa_id,
116
+ "date": published,
117
+ "summary": summary[:200],
118
+ "version_range": version_range,
119
+ }
120
+
121
+ # Index by package name (version matching is approximate for GHSA)
122
+ if name not in index:
123
+ index[name] = []
124
+ index[name].append(entry)
125
+
126
+ log.info("GHSA index: %d packages from %d advisories", len(index), len(advisories))
127
+
128
+ # Cache to disk
129
+ cache_dir = Path(cache_dir)
130
+ cache_dir.mkdir(parents=True, exist_ok=True)
131
+ cache_path = cache_dir / INDEX_FILENAME
132
+ with open(cache_path, "w", encoding="utf-8") as f:
133
+ json.dump({"built_at": datetime.utcnow().isoformat() + "Z",
134
+ "count": len(index),
135
+ "index": index}, f)
136
+ log.info("GHSA index cached to %s", cache_path)
137
+
138
+ return index
139
+
140
+
141
+ def load_cached_index(cache_dir):
142
+ """Load index from cache if fresh enough."""
143
+ cache_path = Path(cache_dir) / INDEX_FILENAME
144
+ if not cache_path.is_file():
145
+ return None
146
+ try:
147
+ stat = cache_path.stat()
148
+ age = time.time() - stat.st_mtime
149
+ if age > CACHE_TTL_SECONDS:
150
+ log.info("GHSA cache expired (%.1fh old)", age / 3600)
151
+ return None
152
+
153
+ with open(cache_path, "r", encoding="utf-8") as f:
154
+ data = json.load(f)
155
+ log.info("Loaded cached GHSA index (%d packages, built %s)",
156
+ data.get("count", 0), data.get("built_at", "?"))
157
+ return data.get("index", {})
158
+ except (json.JSONDecodeError, OSError) as e:
159
+ log.warning("Failed to load GHSA cache: %s", e)
160
+ return None
161
+
162
+
163
+ def lookup(index, name):
164
+ """Check if a package name is in the GHSA index.
165
+
166
+ Returns the list of advisory entries or None.
167
+ """
168
+ entries = index.get(name)
169
+ return entries if entries else None