devguard 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. devguard/INTEGRATION_SUMMARY.md +121 -0
  2. devguard/__init__.py +3 -0
  3. devguard/__main__.py +6 -0
  4. devguard/checkers/__init__.py +41 -0
  5. devguard/checkers/api_usage.py +523 -0
  6. devguard/checkers/aws_cost.py +331 -0
  7. devguard/checkers/aws_iam.py +284 -0
  8. devguard/checkers/base.py +25 -0
  9. devguard/checkers/container.py +137 -0
  10. devguard/checkers/domain.py +189 -0
  11. devguard/checkers/firecrawl.py +117 -0
  12. devguard/checkers/fly.py +225 -0
  13. devguard/checkers/github.py +210 -0
  14. devguard/checkers/npm.py +327 -0
  15. devguard/checkers/npm_security.py +244 -0
  16. devguard/checkers/redteam.py +290 -0
  17. devguard/checkers/secret.py +279 -0
  18. devguard/checkers/swarm.py +376 -0
  19. devguard/checkers/tailscale.py +143 -0
  20. devguard/checkers/tailsnitch.py +303 -0
  21. devguard/checkers/tavily.py +179 -0
  22. devguard/checkers/vercel.py +192 -0
  23. devguard/cli.py +1510 -0
  24. devguard/cli_helpers.py +189 -0
  25. devguard/config.py +249 -0
  26. devguard/core.py +293 -0
  27. devguard/dashboard.py +715 -0
  28. devguard/discovery.py +363 -0
  29. devguard/http_client.py +142 -0
  30. devguard/llm_service.py +481 -0
  31. devguard/mcp_server.py +259 -0
  32. devguard/metrics.py +144 -0
  33. devguard/models.py +208 -0
  34. devguard/reporting.py +1571 -0
  35. devguard/sarif.py +295 -0
  36. devguard/scripts/ANALYSIS_SUMMARY.md +141 -0
  37. devguard/scripts/README.md +221 -0
  38. devguard/scripts/auto_fix_recommendations.py +145 -0
  39. devguard/scripts/generate_npmignore.py +175 -0
  40. devguard/scripts/generate_security_report.py +324 -0
  41. devguard/scripts/prepublish_check.sh +29 -0
  42. devguard/scripts/redteam_npm_packages.py +1262 -0
  43. devguard/scripts/review_all_repos.py +300 -0
  44. devguard/spec.py +617 -0
  45. devguard/sweeps/__init__.py +23 -0
  46. devguard/sweeps/ai_editor_config_audit.py +697 -0
  47. devguard/sweeps/cargo_publish_audit.py +655 -0
  48. devguard/sweeps/dependency_audit.py +419 -0
  49. devguard/sweeps/gitignore_audit.py +336 -0
  50. devguard/sweeps/local_dev.py +260 -0
  51. devguard/sweeps/local_dirty_worktree_secrets.py +521 -0
  52. devguard/sweeps/project_flaudit.py +636 -0
  53. devguard/sweeps/public_github_secrets.py +680 -0
  54. devguard/sweeps/publish_audit.py +478 -0
  55. devguard/sweeps/ssh_key_audit.py +327 -0
  56. devguard/utils.py +174 -0
  57. devguard-0.2.0.dist-info/METADATA +225 -0
  58. devguard-0.2.0.dist-info/RECORD +60 -0
  59. devguard-0.2.0.dist-info/WHEEL +4 -0
  60. devguard-0.2.0.dist-info/entry_points.txt +2 -0
@@ -0,0 +1,697 @@
1
+ """AI editor config audit: validate Claude, Cursor, Copilot, MCP, and cross-tool configs.
2
+
3
+ Scans git repos under a dev root for AI coding tool configurations and checks:
4
+ - CLAUDE.md presence, case correctness, basic validity
5
+ - .claude/ directory (settings.json, rules format, settings.local.json not tracked)
6
+ - Cursor rules (.cursor/rules/*.mdc well-formed, .cursorrules)
7
+ - Copilot config (.github/copilot-instructions.md)
8
+ - MCP configs (.mcp.json, .claude.json -- valid JSON, no hardcoded secrets)
9
+ - Cross-tool consistency (same rules expressed across Claude/Cursor)
10
+ - Generated file freshness (provenance headers from dotfiles sync)
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ import fnmatch
16
+ import json
17
+ import os
18
+ import re
19
+ import subprocess
20
+ from dataclasses import dataclass, field
21
+ from datetime import UTC, datetime
22
+ from pathlib import Path
23
+ from typing import Any
24
+
25
+
26
+ def _utc_now() -> str:
27
+ return datetime.now(UTC).isoformat().replace("+00:00", "Z")
28
+
29
+
30
+ def _default_dev_root() -> Path:
31
+ return Path(os.getenv("DEV_DIR") or "~/Documents/dev").expanduser()
32
+
33
+
34
+ def _iter_git_repos(root: Path, max_depth: int, exclude_globs: list[str]) -> list[Path]:
35
+ """Discover git repos under root."""
36
+ root = root.resolve()
37
+ max_depth = max(0, min(int(max_depth), 6))
38
+ junk = {
39
+ "node_modules",
40
+ ".venv",
41
+ "venv",
42
+ "dist",
43
+ "build",
44
+ ".git",
45
+ ".cache",
46
+ ".state",
47
+ "__pycache__",
48
+ "_trash",
49
+ "_scratch",
50
+ "_external",
51
+ "_archive",
52
+ "_forks",
53
+ "target",
54
+ }
55
+ repos: list[Path] = []
56
+ stack: list[tuple[Path, int]] = [(root, 0)]
57
+ seen: set[Path] = set()
58
+ while stack:
59
+ cur, depth = stack.pop()
60
+ if cur in seen:
61
+ continue
62
+ seen.add(cur)
63
+ if (cur / ".git").exists():
64
+ if not any(fnmatch.fnmatch(str(cur), g) for g in exclude_globs):
65
+ repos.append(cur)
66
+ continue
67
+ if depth >= max_depth:
68
+ continue
69
+ try:
70
+ for child in cur.iterdir():
71
+ if not child.is_dir():
72
+ continue
73
+ name = child.name
74
+ if name in junk or name.startswith("."):
75
+ continue
76
+ stack.append((child, depth + 1))
77
+ except Exception:
78
+ continue
79
+ return sorted(repos)
80
+
81
+
82
+ def _is_likely_public(repo: Path) -> bool:
83
+ for name in ("LICENSE", "LICENSE.md", "LICENSE.txt", "LICENCE"):
84
+ if (repo / name).exists():
85
+ return True
86
+ return False
87
+
88
+
89
+ def _is_tracked_by_git(repo: Path, rel_path: str) -> bool:
90
+ """Check if a file is tracked by git."""
91
+ try:
92
+ res = subprocess.run(
93
+ ["git", "ls-files", rel_path],
94
+ cwd=str(repo),
95
+ capture_output=True,
96
+ text=True,
97
+ timeout=5,
98
+ )
99
+ return bool(res.stdout.strip())
100
+ except Exception:
101
+ return False
102
+
103
+
104
+ def _git_tracks_case(repo: Path, expected: str) -> str | None:
105
+ """Check what case git actually tracks for a file. Returns tracked name or None."""
106
+ try:
107
+ res = subprocess.run(
108
+ ["git", "ls-files", expected],
109
+ cwd=str(repo),
110
+ capture_output=True,
111
+ text=True,
112
+ timeout=5,
113
+ )
114
+ tracked = res.stdout.strip()
115
+ if tracked:
116
+ return tracked
117
+ # Try lowercase variant
118
+ res2 = subprocess.run(
119
+ ["git", "ls-files", expected.lower()],
120
+ cwd=str(repo),
121
+ capture_output=True,
122
+ text=True,
123
+ timeout=5,
124
+ )
125
+ tracked2 = res2.stdout.strip()
126
+ return tracked2 if tracked2 else None
127
+ except Exception:
128
+ return None
129
+
130
+
131
+ # Patterns that look like hardcoded secrets in JSON configs
132
+ _SECRET_PATTERNS = [
133
+ # API keys that are 20+ alphanumeric chars (not env var refs)
134
+ re.compile(
135
+ r'"(?:api[_-]?key|token|secret|password|credential)"\s*:\s*"([a-zA-Z0-9_\-]{20,})"',
136
+ re.IGNORECASE,
137
+ ),
138
+ # Bare long hex strings that aren't env refs
139
+ re.compile(r'"[^"]*"\s*:\s*"([0-9a-f]{32,})"', re.IGNORECASE),
140
+ ]
141
+
142
+ # Env var reference patterns (these are OK)
143
+ _ENV_REF_PATTERNS = [
144
+ re.compile(r"\$\{[A-Z_]+\}"), # ${VAR}
145
+ re.compile(r"\$\{env:[A-Z_]+\}"), # ${env:VAR} (windsurf)
146
+ re.compile(r"\$[A-Z_]+"), # $VAR (gemini)
147
+ ]
148
+
149
+ _PROVENANCE_MARKER = "DO NOT EDIT -- generated by"
150
+
151
+
152
+ @dataclass
153
+ class Finding:
154
+ check: str
155
+ severity: str # "error", "warning", "info"
156
+ message: str
157
+ detail: str = ""
158
+
159
+
160
+ @dataclass
161
+ class RepoAuditResult:
162
+ repo_path: str
163
+ repo_name: str
164
+ is_public: bool
165
+ has_claude_md: bool = False
166
+ has_claude_dir: bool = False
167
+ has_cursor_rules: bool = False
168
+ has_copilot_config: bool = False
169
+ has_mcp_config: bool = False
170
+ ai_tools_detected: list[str] = field(default_factory=list)
171
+ findings: list[Finding] = field(default_factory=list)
172
+
173
+
174
+ def _check_claude_md(repo: Path, result: RepoAuditResult) -> None:
175
+ """Check CLAUDE.md presence and correctness."""
176
+ claude_md = repo / "CLAUDE.md"
177
+ claude_dir_md = repo / ".claude" / "CLAUDE.md"
178
+
179
+ # Check case sensitivity
180
+ tracked = _git_tracks_case(repo, "CLAUDE.md")
181
+ if tracked and tracked != "CLAUDE.md":
182
+ result.findings.append(
183
+ Finding(
184
+ check="claude_md_case",
185
+ severity="error",
186
+ message=f"Git tracks '{tracked}' but Claude Code expects 'CLAUDE.md'",
187
+ detail="On case-insensitive filesystems (macOS), wrong case works locally but breaks on Linux/CI",
188
+ )
189
+ )
190
+
191
+ if claude_md.is_file():
192
+ result.has_claude_md = True
193
+ result.ai_tools_detected.append("claude")
194
+ try:
195
+ text = claude_md.read_text(encoding="utf-8", errors="replace")
196
+ if not text.strip():
197
+ result.findings.append(
198
+ Finding(
199
+ check="claude_md_empty",
200
+ severity="warning",
201
+ message="CLAUDE.md exists but is empty",
202
+ )
203
+ )
204
+ elif len(text) > 50_000:
205
+ result.findings.append(
206
+ Finding(
207
+ check="claude_md_large",
208
+ severity="info",
209
+ message=f"CLAUDE.md is {len(text)} chars (may hit context limits)",
210
+ )
211
+ )
212
+ except Exception:
213
+ pass
214
+ elif claude_dir_md.is_file():
215
+ result.has_claude_md = True
216
+ result.ai_tools_detected.append("claude")
217
+
218
+
219
+ def _check_claude_dir(repo: Path, result: RepoAuditResult) -> None:
220
+ """Check .claude/ directory structure."""
221
+ claude_dir = repo / ".claude"
222
+ if not claude_dir.is_dir():
223
+ return
224
+ result.has_claude_dir = True
225
+
226
+ # Check settings.json validity
227
+ settings = claude_dir / "settings.json"
228
+ if settings.is_file():
229
+ try:
230
+ text = settings.read_text(encoding="utf-8", errors="replace")
231
+ json.loads(text)
232
+ except json.JSONDecodeError as e:
233
+ result.findings.append(
234
+ Finding(
235
+ check="claude_settings_invalid",
236
+ severity="error",
237
+ message=f".claude/settings.json is invalid JSON: {e}",
238
+ )
239
+ )
240
+
241
+ # Check settings.local.json not tracked
242
+ local_settings = claude_dir / "settings.local.json"
243
+ if local_settings.is_file():
244
+ if _is_tracked_by_git(repo, ".claude/settings.local.json"):
245
+ result.findings.append(
246
+ Finding(
247
+ check="claude_local_settings_tracked",
248
+ severity="error",
249
+ message=".claude/settings.local.json is tracked by git",
250
+ detail="This file may contain machine-specific or work-specific secrets. "
251
+ "Add to .gitignore: .claude/settings.local.json",
252
+ )
253
+ )
254
+
255
+ # Check rules files
256
+ rules_dir = claude_dir / "rules"
257
+ if rules_dir.is_dir():
258
+ for f in sorted(rules_dir.iterdir()):
259
+ if not f.is_file() or f.suffix != ".md":
260
+ continue
261
+ try:
262
+ text = f.read_text(encoding="utf-8", errors="replace")
263
+ # Check for paths: frontmatter (glob-triggered rules)
264
+ if text.startswith("---"):
265
+ end = text.find("---", 3)
266
+ if end == -1:
267
+ result.findings.append(
268
+ Finding(
269
+ check="claude_rule_bad_frontmatter",
270
+ severity="warning",
271
+ message=f".claude/rules/{f.name}: unclosed frontmatter",
272
+ )
273
+ )
274
+ if not text.strip():
275
+ result.findings.append(
276
+ Finding(
277
+ check="claude_rule_empty",
278
+ severity="warning",
279
+ message=f".claude/rules/{f.name}: empty rule file",
280
+ )
281
+ )
282
+ except Exception:
283
+ pass
284
+
285
+
286
+ def _check_cursor_rules(repo: Path, result: RepoAuditResult) -> None:
287
+ """Check Cursor configuration."""
288
+ cursor_rules_dir = repo / ".cursor" / "rules"
289
+ cursorrules_file = repo / ".cursorrules"
290
+
291
+ has_rules_dir = cursor_rules_dir.is_dir()
292
+ has_cursorrules = cursorrules_file.is_file()
293
+
294
+ if has_rules_dir or has_cursorrules:
295
+ result.has_cursor_rules = True
296
+ if "cursor" not in result.ai_tools_detected:
297
+ result.ai_tools_detected.append("cursor")
298
+
299
+ # Warn about deprecated .cursorrules if .cursor/rules/ also exists
300
+ if has_cursorrules and has_rules_dir:
301
+ result.findings.append(
302
+ Finding(
303
+ check="cursor_duplicate_rules",
304
+ severity="warning",
305
+ message="Both .cursorrules and .cursor/rules/ exist",
306
+ detail=".cursorrules is deprecated; migrate to .cursor/rules/",
307
+ )
308
+ )
309
+
310
+ if has_rules_dir:
311
+ for f in sorted(cursor_rules_dir.iterdir()):
312
+ if not f.is_file():
313
+ continue
314
+ if f.suffix not in (".mdc", ".md"):
315
+ continue
316
+ try:
317
+ text = f.read_text(encoding="utf-8", errors="replace")
318
+ # .mdc files should have YAML frontmatter
319
+ if f.suffix == ".mdc":
320
+ if not text.startswith("---"):
321
+ result.findings.append(
322
+ Finding(
323
+ check="cursor_mdc_no_frontmatter",
324
+ severity="warning",
325
+ message=f".cursor/rules/{f.name}: .mdc file missing frontmatter",
326
+ detail="Cursor .mdc rules need ---\\ndescription: ...\\nalwaysApply: true/false\\n---",
327
+ )
328
+ )
329
+ else:
330
+ end = text.find("---", 3)
331
+ if end == -1:
332
+ result.findings.append(
333
+ Finding(
334
+ check="cursor_mdc_bad_frontmatter",
335
+ severity="warning",
336
+ message=f".cursor/rules/{f.name}: unclosed frontmatter",
337
+ )
338
+ )
339
+ else:
340
+ fm = text[3:end].strip()
341
+ if "description:" not in fm:
342
+ result.findings.append(
343
+ Finding(
344
+ check="cursor_mdc_no_description",
345
+ severity="info",
346
+ message=f".cursor/rules/{f.name}: missing description in frontmatter",
347
+ )
348
+ )
349
+ if "alwaysApply:" not in fm:
350
+ result.findings.append(
351
+ Finding(
352
+ check="cursor_mdc_no_always_apply",
353
+ severity="info",
354
+ message=f".cursor/rules/{f.name}: missing alwaysApply in frontmatter",
355
+ )
356
+ )
357
+ if not text.strip():
358
+ result.findings.append(
359
+ Finding(
360
+ check="cursor_rule_empty",
361
+ severity="warning",
362
+ message=f".cursor/rules/{f.name}: empty rule file",
363
+ )
364
+ )
365
+ except Exception:
366
+ pass
367
+
368
+
369
+ def _check_copilot_config(repo: Path, result: RepoAuditResult) -> None:
370
+ """Check GitHub Copilot configuration."""
371
+ copilot_instructions = repo / ".github" / "copilot-instructions.md"
372
+ if copilot_instructions.is_file():
373
+ result.has_copilot_config = True
374
+ if "copilot" not in result.ai_tools_detected:
375
+ result.ai_tools_detected.append("copilot")
376
+ try:
377
+ text = copilot_instructions.read_text(encoding="utf-8", errors="replace")
378
+ if not text.strip():
379
+ result.findings.append(
380
+ Finding(
381
+ check="copilot_instructions_empty",
382
+ severity="warning",
383
+ message=".github/copilot-instructions.md exists but is empty",
384
+ )
385
+ )
386
+ except Exception:
387
+ pass
388
+
389
+
390
+ def _check_mcp_configs(repo: Path, result: RepoAuditResult) -> None:
391
+ """Check MCP server configurations for validity and secret hygiene."""
392
+ mcp_files = [
393
+ (".mcp.json", "mcp"),
394
+ (".claude.json", "claude"),
395
+ ]
396
+ for filename, tool in mcp_files:
397
+ mcp_path = repo / filename
398
+ if not mcp_path.is_file():
399
+ continue
400
+ result.has_mcp_config = True
401
+ if tool not in result.ai_tools_detected:
402
+ result.ai_tools_detected.append(tool)
403
+
404
+ try:
405
+ text = mcp_path.read_text(encoding="utf-8", errors="replace")
406
+ try:
407
+ data = json.loads(text)
408
+ except json.JSONDecodeError as e:
409
+ result.findings.append(
410
+ Finding(
411
+ check="mcp_invalid_json",
412
+ severity="error",
413
+ message=f"{filename}: invalid JSON: {e}",
414
+ )
415
+ )
416
+ continue
417
+
418
+ # Check for hardcoded secrets
419
+ for pattern in _SECRET_PATTERNS:
420
+ for match in pattern.finditer(text):
421
+ value = match.group(1)
422
+ # Skip if it looks like an env var reference
423
+ if any(p.search(value) for p in _ENV_REF_PATTERNS):
424
+ continue
425
+ # Skip common non-secret values
426
+ if value in ("true", "false", "null") or value.startswith("http"):
427
+ continue
428
+ result.findings.append(
429
+ Finding(
430
+ check="mcp_hardcoded_secret",
431
+ severity="error",
432
+ message=f"{filename}: possible hardcoded secret/token",
433
+ detail="Use ${VAR} env var references instead of literal values. "
434
+ "Keep secrets in machine-local settings, not tracked files.",
435
+ )
436
+ )
437
+ break # one finding per file is enough
438
+
439
+ # Check that mcpServers entries have required fields
440
+ servers = data.get("mcpServers", {})
441
+ for name, config in servers.items():
442
+ if not isinstance(config, dict):
443
+ result.findings.append(
444
+ Finding(
445
+ check="mcp_server_not_object",
446
+ severity="error",
447
+ message=f"{filename}: server '{name}' config is not an object",
448
+ )
449
+ )
450
+ continue
451
+ if "command" not in config and "url" not in config:
452
+ result.findings.append(
453
+ Finding(
454
+ check="mcp_server_no_entrypoint",
455
+ severity="warning",
456
+ message=f"{filename}: server '{name}' has no 'command' or 'url'",
457
+ )
458
+ )
459
+ except Exception:
460
+ pass
461
+
462
+ # Also check .cursor/mcp.json
463
+ cursor_mcp = repo / ".cursor" / "mcp.json"
464
+ if cursor_mcp.is_file():
465
+ result.has_mcp_config = True
466
+ try:
467
+ text = cursor_mcp.read_text(encoding="utf-8", errors="replace")
468
+ json.loads(text)
469
+ except json.JSONDecodeError as e:
470
+ result.findings.append(
471
+ Finding(
472
+ check="mcp_invalid_json",
473
+ severity="error",
474
+ message=f".cursor/mcp.json: invalid JSON: {e}",
475
+ )
476
+ )
477
+
478
+
479
+ def _check_generated_freshness(repo: Path, result: RepoAuditResult) -> None:
480
+ """Check if generated rule files have provenance headers and might be stale."""
481
+ for rules_dir, tool in [
482
+ (repo / ".claude" / "rules", "claude"),
483
+ (repo / ".cursor" / "rules", "cursor"),
484
+ ]:
485
+ if not rules_dir.is_dir():
486
+ continue
487
+ for f in sorted(rules_dir.iterdir()):
488
+ if not f.is_file():
489
+ continue
490
+ try:
491
+ text = f.read_text(encoding="utf-8", errors="replace")
492
+ if _PROVENANCE_MARKER in text:
493
+ # File was generated by dotfiles sync -- check if source ref exists
494
+ m = re.search(r"config/rules/(\S+)", text)
495
+ if m:
496
+ source_name = m.group(1)
497
+ # Just note it as generated -- freshness check would need
498
+ # the dotfiles repo path which we may not have
499
+ pass # no finding needed if provenance is present
500
+ except Exception:
501
+ pass
502
+
503
+
504
+ def _check_cross_tool_consistency(repo: Path, result: RepoAuditResult) -> None:
505
+ """Check for consistency between Claude and Cursor rule sets."""
506
+ claude_rules_dir = repo / ".claude" / "rules"
507
+ cursor_rules_dir = repo / ".cursor" / "rules"
508
+
509
+ if not claude_rules_dir.is_dir() or not cursor_rules_dir.is_dir():
510
+ return
511
+
512
+ # Get rule names (strip extensions)
513
+ claude_names: set[str] = set()
514
+ for f in claude_rules_dir.iterdir():
515
+ if f.is_file() and f.suffix == ".md":
516
+ claude_names.add(f.stem)
517
+
518
+ cursor_names: set[str] = set()
519
+ for f in cursor_rules_dir.iterdir():
520
+ if f.is_file() and f.suffix in (".mdc", ".md"):
521
+ cursor_names.add(f.stem)
522
+
523
+ # Rules that exist in one but not the other
524
+ # Only flag if both tools are in use and there's a meaningful gap
525
+ shared = claude_names & cursor_names
526
+ claude_only = claude_names - cursor_names
527
+ cursor_only = cursor_names - claude_names
528
+
529
+ # Skip tool-specific rules (e.g., skill-triggers is Claude-only)
530
+ claude_specific = {"skill-triggers"}
531
+ claude_only -= claude_specific
532
+
533
+ if claude_only and cursor_only:
534
+ result.findings.append(
535
+ Finding(
536
+ check="cross_tool_rule_drift",
537
+ severity="info",
538
+ message=f"Rule sets differ: claude-only={sorted(claude_only)}, cursor-only={sorted(cursor_only)}",
539
+ detail="If a rule applies to both tools, sync via dotfiles rules.toml manifest",
540
+ )
541
+ )
542
+
543
+
544
+ def _check_gitignore_coverage(repo: Path, result: RepoAuditResult) -> None:
545
+ """Check if AI config files that should be ignored are in .gitignore."""
546
+ gi_path = repo / ".gitignore"
547
+ if not gi_path.is_file():
548
+ return
549
+ try:
550
+ gi_text = gi_path.read_text(encoding="utf-8", errors="replace")
551
+ except Exception:
552
+ return
553
+
554
+ gi_lines = {
555
+ line.strip().lstrip("/").rstrip("/")
556
+ for line in gi_text.splitlines()
557
+ if line.strip() and not line.strip().startswith("#")
558
+ }
559
+
560
+ # .claude/settings.local.json should be ignored
561
+ if (repo / ".claude").is_dir():
562
+ local_patterns = {".claude/settings.local.json", ".claude/settings.local.*"}
563
+ if not any(p.rstrip("/") in gi_lines for p in local_patterns):
564
+ # Check if broader .claude/ pattern covers it
565
+ if ".claude" not in gi_lines and ".claude/" not in gi_lines:
566
+ if (repo / ".claude" / "settings.local.json").exists():
567
+ result.findings.append(
568
+ Finding(
569
+ check="gitignore_missing_local_settings",
570
+ severity="warning",
571
+ message=".claude/settings.local.json exists but not in .gitignore",
572
+ )
573
+ )
574
+
575
+
576
+ def _audit_repo(repo: Path) -> RepoAuditResult:
577
+ """Run all AI editor config checks on a single repo."""
578
+ result = RepoAuditResult(
579
+ repo_path=str(repo),
580
+ repo_name=repo.name,
581
+ is_public=_is_likely_public(repo),
582
+ )
583
+
584
+ _check_claude_md(repo, result)
585
+ _check_claude_dir(repo, result)
586
+ _check_cursor_rules(repo, result)
587
+ _check_copilot_config(repo, result)
588
+ _check_mcp_configs(repo, result)
589
+ _check_generated_freshness(repo, result)
590
+ _check_cross_tool_consistency(repo, result)
591
+ _check_gitignore_coverage(repo, result)
592
+
593
+ return result
594
+
595
+
596
+ def audit_ai_editor_configs(
597
+ *,
598
+ dev_root: Path | None = None,
599
+ max_depth: int = 2,
600
+ exclude_repo_globs: list[str] | None = None,
601
+ only_with_configs: bool = True,
602
+ ) -> tuple[dict[str, Any], list[str]]:
603
+ """Audit AI editor configurations across repos and return a report."""
604
+ errors: list[str] = []
605
+ root = dev_root if dev_root is not None else _default_dev_root()
606
+ globs = [g for g in (exclude_repo_globs or []) if isinstance(g, str) and g.strip()]
607
+
608
+ repos = _iter_git_repos(root, max_depth=max_depth, exclude_globs=globs)
609
+
610
+ results: list[RepoAuditResult] = []
611
+ for repo in repos:
612
+ try:
613
+ result = _audit_repo(repo)
614
+ results.append(result)
615
+ except Exception as exc:
616
+ errors.append(f"failed to audit {repo}: {exc}")
617
+
618
+ # Filter to repos with AI configs if requested
619
+ if only_with_configs:
620
+ results = [r for r in results if r.ai_tools_detected]
621
+
622
+ # Sort: public repos with errors first
623
+ results.sort(
624
+ key=lambda r: (
625
+ -r.is_public,
626
+ -sum(1 for f in r.findings if f.severity == "error"),
627
+ -sum(1 for f in r.findings if f.severity == "warning"),
628
+ r.repo_name,
629
+ )
630
+ )
631
+
632
+ # Summary
633
+ repos_with_errors = [r for r in results if any(f.severity == "error" for f in r.findings)]
634
+ repos_with_warnings = [r for r in results if any(f.severity == "warning" for f in r.findings)]
635
+ tool_counts: dict[str, int] = {}
636
+ for r in results:
637
+ for tool in r.ai_tools_detected:
638
+ tool_counts[tool] = tool_counts.get(tool, 0) + 1
639
+ check_counts: dict[str, int] = {}
640
+ for r in results:
641
+ for f in r.findings:
642
+ check_counts[f.check] = check_counts.get(f.check, 0) + 1
643
+
644
+ repos_without_any_config = sum(1 for r in results if not r.ai_tools_detected)
645
+
646
+ report: dict[str, Any] = {
647
+ "generated_at": _utc_now(),
648
+ "scope": {
649
+ "dev_root": str(root),
650
+ "repos_scanned": len(repos),
651
+ "repos_with_ai_configs": len([r for r in results if r.ai_tools_detected]),
652
+ "max_depth": max_depth,
653
+ "only_with_configs": only_with_configs,
654
+ "exclude_repo_globs": globs,
655
+ },
656
+ "summary": {
657
+ "repos_with_errors": len(repos_with_errors),
658
+ "repos_with_errors_list": [r.repo_name for r in repos_with_errors],
659
+ "repos_with_warnings": len(repos_with_warnings),
660
+ "repos_with_warnings_list": [r.repo_name for r in repos_with_warnings],
661
+ "total_findings": sum(len(r.findings) for r in results),
662
+ "findings_by_check": sorted(check_counts.items(), key=lambda x: -x[1]),
663
+ "total_errors": sum(1 for r in results for f in r.findings if f.severity == "error"),
664
+ "total_warnings": sum(
665
+ 1 for r in results for f in r.findings if f.severity == "warning"
666
+ ),
667
+ "tool_adoption": sorted(tool_counts.items(), key=lambda x: -x[1]),
668
+ "repos_without_any_config": repos_without_any_config,
669
+ },
670
+ "repos": [
671
+ {
672
+ "repo_path": r.repo_path,
673
+ "repo_name": r.repo_name,
674
+ "is_public": r.is_public,
675
+ "ai_tools": r.ai_tools_detected,
676
+ "findings": [
677
+ {
678
+ "check": f.check,
679
+ "severity": f.severity,
680
+ "message": f.message,
681
+ **({"detail": f.detail} if f.detail else {}),
682
+ }
683
+ for f in r.findings
684
+ ],
685
+ }
686
+ for r in results
687
+ if r.findings
688
+ ][:200],
689
+ "clean_repos": [r.repo_name for r in results if r.ai_tools_detected and not r.findings],
690
+ "errors": errors,
691
+ }
692
+ return report, errors
693
+
694
+
695
+ def write_report(path: Path, report: dict[str, Any]) -> None:
696
+ path.parent.mkdir(parents=True, exist_ok=True)
697
+ path.write_text(json.dumps(report, indent=2) + "\n")