devguard 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- devguard/INTEGRATION_SUMMARY.md +121 -0
- devguard/__init__.py +3 -0
- devguard/__main__.py +6 -0
- devguard/checkers/__init__.py +41 -0
- devguard/checkers/api_usage.py +523 -0
- devguard/checkers/aws_cost.py +331 -0
- devguard/checkers/aws_iam.py +284 -0
- devguard/checkers/base.py +25 -0
- devguard/checkers/container.py +137 -0
- devguard/checkers/domain.py +189 -0
- devguard/checkers/firecrawl.py +117 -0
- devguard/checkers/fly.py +225 -0
- devguard/checkers/github.py +210 -0
- devguard/checkers/npm.py +327 -0
- devguard/checkers/npm_security.py +244 -0
- devguard/checkers/redteam.py +290 -0
- devguard/checkers/secret.py +279 -0
- devguard/checkers/swarm.py +376 -0
- devguard/checkers/tailscale.py +143 -0
- devguard/checkers/tailsnitch.py +303 -0
- devguard/checkers/tavily.py +179 -0
- devguard/checkers/vercel.py +192 -0
- devguard/cli.py +1510 -0
- devguard/cli_helpers.py +189 -0
- devguard/config.py +249 -0
- devguard/core.py +293 -0
- devguard/dashboard.py +715 -0
- devguard/discovery.py +363 -0
- devguard/http_client.py +142 -0
- devguard/llm_service.py +481 -0
- devguard/mcp_server.py +259 -0
- devguard/metrics.py +144 -0
- devguard/models.py +208 -0
- devguard/reporting.py +1571 -0
- devguard/sarif.py +295 -0
- devguard/scripts/ANALYSIS_SUMMARY.md +141 -0
- devguard/scripts/README.md +221 -0
- devguard/scripts/auto_fix_recommendations.py +145 -0
- devguard/scripts/generate_npmignore.py +175 -0
- devguard/scripts/generate_security_report.py +324 -0
- devguard/scripts/prepublish_check.sh +29 -0
- devguard/scripts/redteam_npm_packages.py +1262 -0
- devguard/scripts/review_all_repos.py +300 -0
- devguard/spec.py +617 -0
- devguard/sweeps/__init__.py +23 -0
- devguard/sweeps/ai_editor_config_audit.py +697 -0
- devguard/sweeps/cargo_publish_audit.py +655 -0
- devguard/sweeps/dependency_audit.py +419 -0
- devguard/sweeps/gitignore_audit.py +336 -0
- devguard/sweeps/local_dev.py +260 -0
- devguard/sweeps/local_dirty_worktree_secrets.py +521 -0
- devguard/sweeps/project_flaudit.py +636 -0
- devguard/sweeps/public_github_secrets.py +680 -0
- devguard/sweeps/publish_audit.py +478 -0
- devguard/sweeps/ssh_key_audit.py +327 -0
- devguard/utils.py +174 -0
- devguard-0.2.0.dist-info/METADATA +225 -0
- devguard-0.2.0.dist-info/RECORD +60 -0
- devguard-0.2.0.dist-info/WHEEL +4 -0
- devguard-0.2.0.dist-info/entry_points.txt +2 -0
devguard/cli.py
ADDED
|
@@ -0,0 +1,1510 @@
|
|
|
1
|
+
"""CLI interface for Guardian."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import json
|
|
5
|
+
import logging
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
import typer
|
|
9
|
+
from rich.console import Console
|
|
10
|
+
from rich.prompt import Prompt
|
|
11
|
+
from rich.table import Table
|
|
12
|
+
|
|
13
|
+
from devguard.config import get_settings
|
|
14
|
+
from devguard.core import Guardian
|
|
15
|
+
from devguard.dashboard import run_dashboard
|
|
16
|
+
from devguard.reporting import Reporter
|
|
17
|
+
|
|
18
|
+
app = typer.Typer(
|
|
19
|
+
help="Guardian - Unified monitoring for npm packages, GitHub repos, and deployments"
|
|
20
|
+
)
|
|
21
|
+
console = Console()
|
|
22
|
+
|
|
23
|
+
_MAX_TABLE_ROWS = 15
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _sev_style(severity: str) -> str:
|
|
27
|
+
"""Return a Rich style string for the given severity level."""
|
|
28
|
+
s = severity.lower()
|
|
29
|
+
if s in ("critical", "error", "high"):
|
|
30
|
+
return "red"
|
|
31
|
+
if s in ("warning", "medium"):
|
|
32
|
+
return "yellow"
|
|
33
|
+
return "dim"
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _print_local_dev_table(hits: list) -> None:
|
|
37
|
+
if not hits:
|
|
38
|
+
return
|
|
39
|
+
table = Table(title="local_dev findings", title_style="bold")
|
|
40
|
+
table.add_column("repo_path", style="cyan", max_width=40)
|
|
41
|
+
table.add_column("finding_type", style="yellow")
|
|
42
|
+
table.add_column("file_path", max_width=50)
|
|
43
|
+
for h in hits[:_MAX_TABLE_ROWS]:
|
|
44
|
+
reason = h.reason if hasattr(h, "reason") else h.get("reason", "")
|
|
45
|
+
ftype = reason.split(":")[0] if ":" in reason else reason
|
|
46
|
+
repo = h.repo_path if hasattr(h, "repo_path") else h.get("repo_path", "")
|
|
47
|
+
fpath = h.file_path if hasattr(h, "file_path") else h.get("file_path", "")
|
|
48
|
+
repo_short = Path(repo).name if repo else ""
|
|
49
|
+
table.add_row(repo_short, ftype, fpath)
|
|
50
|
+
if len(hits) > _MAX_TABLE_ROWS:
|
|
51
|
+
table.add_row("...", f"+{len(hits) - _MAX_TABLE_ROWS} more", "", style="dim")
|
|
52
|
+
console.print(table)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def _print_public_github_secrets_table(report: dict) -> None:
|
|
56
|
+
findings = report.get("findings", [])
|
|
57
|
+
if not findings:
|
|
58
|
+
return
|
|
59
|
+
from collections import Counter
|
|
60
|
+
|
|
61
|
+
repo_counts: Counter = Counter()
|
|
62
|
+
repo_engine: dict[str, set] = {}
|
|
63
|
+
for f in findings:
|
|
64
|
+
repo = f.get("repo", "?")
|
|
65
|
+
repo_counts[repo] += 1
|
|
66
|
+
engine = (f.get("type", "") or "").split(":")[0]
|
|
67
|
+
repo_engine.setdefault(repo, set()).add(engine)
|
|
68
|
+
table = Table(title="public_github_secrets findings", title_style="bold")
|
|
69
|
+
table.add_column("repo_name", style="cyan")
|
|
70
|
+
table.add_column("finding_count", justify="right", style="red")
|
|
71
|
+
table.add_column("engine")
|
|
72
|
+
for repo, count in repo_counts.most_common(_MAX_TABLE_ROWS):
|
|
73
|
+
engines = ", ".join(sorted(repo_engine.get(repo, set())))
|
|
74
|
+
table.add_row(repo, str(count), engines)
|
|
75
|
+
if len(repo_counts) > _MAX_TABLE_ROWS:
|
|
76
|
+
table.add_row("...", f"+{len(repo_counts) - _MAX_TABLE_ROWS} repos", "", style="dim")
|
|
77
|
+
console.print(table)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def _print_local_dirty_worktree_table(report: dict) -> None:
|
|
81
|
+
findings = report.get("findings", [])
|
|
82
|
+
if not findings:
|
|
83
|
+
return
|
|
84
|
+
from collections import Counter
|
|
85
|
+
|
|
86
|
+
repo_counts: Counter = Counter()
|
|
87
|
+
for f in findings:
|
|
88
|
+
repo = f.get("repo_path", "?")
|
|
89
|
+
repo_counts[Path(repo).name] += 1
|
|
90
|
+
table = Table(title="local_dirty_worktree_secrets findings", title_style="bold")
|
|
91
|
+
table.add_column("repo_name", style="cyan")
|
|
92
|
+
table.add_column("finding_count", justify="right", style="red")
|
|
93
|
+
for repo, count in repo_counts.most_common(_MAX_TABLE_ROWS):
|
|
94
|
+
table.add_row(repo, str(count))
|
|
95
|
+
if len(repo_counts) > _MAX_TABLE_ROWS:
|
|
96
|
+
table.add_row("...", f"+{len(repo_counts) - _MAX_TABLE_ROWS} repos", style="dim")
|
|
97
|
+
console.print(table)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def _print_project_flaudit_table(results: list) -> None:
|
|
101
|
+
rows = []
|
|
102
|
+
for r in results:
|
|
103
|
+
if not r.findings:
|
|
104
|
+
continue
|
|
105
|
+
repo_name = Path(r.repo_path).name
|
|
106
|
+
count = len(r.findings)
|
|
107
|
+
sevs = [f.severity for f in r.findings]
|
|
108
|
+
top_sev = "low"
|
|
109
|
+
for s in ("critical", "high", "medium"):
|
|
110
|
+
if s in sevs:
|
|
111
|
+
top_sev = s
|
|
112
|
+
break
|
|
113
|
+
rows.append((repo_name, count, top_sev))
|
|
114
|
+
if not rows:
|
|
115
|
+
return
|
|
116
|
+
rows.sort(key=lambda x: -x[1])
|
|
117
|
+
table = Table(title="project_flaudit findings", title_style="bold")
|
|
118
|
+
table.add_column("repo_name", style="cyan")
|
|
119
|
+
table.add_column("finding_count", justify="right")
|
|
120
|
+
table.add_column("top_severity")
|
|
121
|
+
for repo_name, count, top_sev in rows[:_MAX_TABLE_ROWS]:
|
|
122
|
+
table.add_row(repo_name, str(count), f"[{_sev_style(top_sev)}]{top_sev}[/]")
|
|
123
|
+
if len(rows) > _MAX_TABLE_ROWS:
|
|
124
|
+
table.add_row("...", f"+{len(rows) - _MAX_TABLE_ROWS} repos", "", style="dim")
|
|
125
|
+
console.print(table)
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def _print_gitignore_audit_table(report: dict) -> None:
|
|
129
|
+
repos = report.get("repos", [])
|
|
130
|
+
if not repos:
|
|
131
|
+
return
|
|
132
|
+
table = Table(title="gitignore_audit findings", title_style="bold")
|
|
133
|
+
table.add_column("repo_name", style="cyan")
|
|
134
|
+
table.add_column("is_public")
|
|
135
|
+
table.add_column("missing_patterns", max_width=60)
|
|
136
|
+
for r in repos[:_MAX_TABLE_ROWS]:
|
|
137
|
+
repo_name = Path(r["repo_path"]).name
|
|
138
|
+
is_pub = "[red]yes[/]" if r.get("is_public") else "no"
|
|
139
|
+
missing = ", ".join(r.get("missing_patterns", [])[:6])
|
|
140
|
+
if len(r.get("missing_patterns", [])) > 6:
|
|
141
|
+
missing += f" (+{len(r['missing_patterns']) - 6})"
|
|
142
|
+
table.add_row(repo_name, is_pub, missing)
|
|
143
|
+
if len(repos) > _MAX_TABLE_ROWS:
|
|
144
|
+
table.add_row("...", "", f"+{len(repos) - _MAX_TABLE_ROWS} repos", style="dim")
|
|
145
|
+
console.print(table)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def _print_dependency_audit_table(report: dict) -> None:
|
|
149
|
+
repos = [r for r in report.get("repos", []) if r.get("vulns")]
|
|
150
|
+
if not repos:
|
|
151
|
+
return
|
|
152
|
+
table = Table(title="dependency_audit findings", title_style="bold")
|
|
153
|
+
table.add_column("repo_name", style="cyan")
|
|
154
|
+
table.add_column("severity")
|
|
155
|
+
table.add_column("vuln_count", justify="right")
|
|
156
|
+
for r in repos[:_MAX_TABLE_ROWS]:
|
|
157
|
+
repo_name = Path(r["repo_path"]).name
|
|
158
|
+
sev_counts = r.get("severity_counts", {})
|
|
159
|
+
top_sev = "low"
|
|
160
|
+
for s in ("critical", "high", "medium", "low"):
|
|
161
|
+
if sev_counts.get(s, 0) > 0:
|
|
162
|
+
top_sev = s
|
|
163
|
+
break
|
|
164
|
+
count = r.get("vuln_count", len(r.get("vulns", [])))
|
|
165
|
+
table.add_row(repo_name, f"[{_sev_style(top_sev)}]{top_sev}[/]", str(count))
|
|
166
|
+
if len(repos) > _MAX_TABLE_ROWS:
|
|
167
|
+
table.add_row("...", "", f"+{len(repos) - _MAX_TABLE_ROWS} repos", style="dim")
|
|
168
|
+
console.print(table)
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def _print_ssh_key_audit_table(report: dict) -> None:
|
|
172
|
+
keys_with_issues = [k for k in report.get("keys", []) if k.get("issues")]
|
|
173
|
+
if not keys_with_issues:
|
|
174
|
+
return
|
|
175
|
+
table = Table(title="ssh_key_audit findings", title_style="bold")
|
|
176
|
+
table.add_column("key_file", style="cyan", max_width=40)
|
|
177
|
+
table.add_column("issue_type", style="yellow")
|
|
178
|
+
table.add_column("detail", max_width=50)
|
|
179
|
+
row_count = 0
|
|
180
|
+
for k in keys_with_issues:
|
|
181
|
+
key_name = Path(k["key_path"]).name
|
|
182
|
+
for issue in k["issues"]:
|
|
183
|
+
if row_count >= _MAX_TABLE_ROWS:
|
|
184
|
+
break
|
|
185
|
+
if "passphrase" in issue:
|
|
186
|
+
itype = "no_passphrase"
|
|
187
|
+
style = "red"
|
|
188
|
+
elif "deprecated" in issue or "bit" in issue or "NIST" in issue:
|
|
189
|
+
itype = "weak_algorithm"
|
|
190
|
+
style = "red"
|
|
191
|
+
elif "permissions" in issue:
|
|
192
|
+
itype = "bad_permissions"
|
|
193
|
+
style = "yellow"
|
|
194
|
+
elif "GitHub" in issue:
|
|
195
|
+
itype = "not_on_github"
|
|
196
|
+
style = "yellow"
|
|
197
|
+
else:
|
|
198
|
+
itype = "other"
|
|
199
|
+
style = "dim"
|
|
200
|
+
table.add_row(f"[{style}]{key_name}[/]", itype, issue)
|
|
201
|
+
row_count += 1
|
|
202
|
+
if row_count >= _MAX_TABLE_ROWS:
|
|
203
|
+
break
|
|
204
|
+
total_issues = sum(len(k["issues"]) for k in keys_with_issues)
|
|
205
|
+
if total_issues > _MAX_TABLE_ROWS:
|
|
206
|
+
table.add_row("...", f"+{total_issues - _MAX_TABLE_ROWS} more", "", style="dim")
|
|
207
|
+
console.print(table)
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def _print_cargo_publish_audit_table(report: dict) -> None:
|
|
211
|
+
repos = [r for r in report.get("repos", []) if r.get("findings")]
|
|
212
|
+
if not repos:
|
|
213
|
+
return
|
|
214
|
+
table = Table(title="cargo_publish_audit findings", title_style="bold")
|
|
215
|
+
table.add_column("repo_name", style="cyan")
|
|
216
|
+
table.add_column("check")
|
|
217
|
+
table.add_column("severity")
|
|
218
|
+
table.add_column("message", max_width=50)
|
|
219
|
+
row_count = 0
|
|
220
|
+
for r in repos:
|
|
221
|
+
for f in r.get("findings", []):
|
|
222
|
+
if row_count >= _MAX_TABLE_ROWS:
|
|
223
|
+
break
|
|
224
|
+
sev = f.get("severity", "info")
|
|
225
|
+
table.add_row(
|
|
226
|
+
r["repo_name"],
|
|
227
|
+
f.get("check", ""),
|
|
228
|
+
f"[{_sev_style(sev)}]{sev}[/]",
|
|
229
|
+
f.get("message", ""),
|
|
230
|
+
)
|
|
231
|
+
row_count += 1
|
|
232
|
+
if row_count >= _MAX_TABLE_ROWS:
|
|
233
|
+
break
|
|
234
|
+
total = sum(len(r.get("findings", [])) for r in repos)
|
|
235
|
+
if total > _MAX_TABLE_ROWS:
|
|
236
|
+
table.add_row("...", "", "", f"+{total - _MAX_TABLE_ROWS} more", style="dim")
|
|
237
|
+
console.print(table)
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
def _print_ai_editor_config_table(report: dict) -> None:
|
|
241
|
+
repos = [r for r in report.get("repos", []) if r.get("findings")]
|
|
242
|
+
if not repos:
|
|
243
|
+
return
|
|
244
|
+
table = Table(title="ai_editor_config_audit findings", title_style="bold")
|
|
245
|
+
table.add_column("repo_name", style="cyan")
|
|
246
|
+
table.add_column("check")
|
|
247
|
+
table.add_column("severity")
|
|
248
|
+
table.add_column("message", max_width=50)
|
|
249
|
+
row_count = 0
|
|
250
|
+
for r in repos:
|
|
251
|
+
for f in r.get("findings", []):
|
|
252
|
+
if row_count >= _MAX_TABLE_ROWS:
|
|
253
|
+
break
|
|
254
|
+
sev = f.get("severity", "info")
|
|
255
|
+
table.add_row(
|
|
256
|
+
r["repo_name"],
|
|
257
|
+
f.get("check", ""),
|
|
258
|
+
f"[{_sev_style(sev)}]{sev}[/]",
|
|
259
|
+
f.get("message", ""),
|
|
260
|
+
)
|
|
261
|
+
row_count += 1
|
|
262
|
+
if row_count >= _MAX_TABLE_ROWS:
|
|
263
|
+
break
|
|
264
|
+
total = sum(len(r.get("findings", [])) for r in repos)
|
|
265
|
+
if total > _MAX_TABLE_ROWS:
|
|
266
|
+
table.add_row("...", "", "", f"+{total - _MAX_TABLE_ROWS} more", style="dim")
|
|
267
|
+
console.print(table)
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
def _configure_logging(json_output: bool = False) -> None:
|
|
271
|
+
"""Configure logging based on output mode.
|
|
272
|
+
|
|
273
|
+
In JSON mode, suppress INFO logs from httpx/httpcore to keep output clean.
|
|
274
|
+
"""
|
|
275
|
+
if json_output:
|
|
276
|
+
# Suppress verbose HTTP logs in JSON mode
|
|
277
|
+
logging.getLogger("httpx").setLevel(logging.WARNING)
|
|
278
|
+
logging.getLogger("httpcore").setLevel(logging.WARNING)
|
|
279
|
+
logging.getLogger("devguard").setLevel(logging.WARNING)
|
|
280
|
+
|
|
281
|
+
|
|
282
|
+
@app.command()
|
|
283
|
+
def check(
|
|
284
|
+
json_output: bool = typer.Option(False, "--json", "-j", help="Output as JSON"),
|
|
285
|
+
watch: bool = typer.Option(False, "--watch", "-w", help="Watch mode (continuous checks)"),
|
|
286
|
+
interval: int = typer.Option(
|
|
287
|
+
None, "--interval", "-i", help="Interval in seconds for watch mode"
|
|
288
|
+
),
|
|
289
|
+
skip_validation: bool = typer.Option(
|
|
290
|
+
False, "--skip-validation", help="Skip configuration validation"
|
|
291
|
+
),
|
|
292
|
+
env_file: str | None = typer.Option(
|
|
293
|
+
None,
|
|
294
|
+
"--env-file",
|
|
295
|
+
help="Optional env file path to load (overrides default env_file search).",
|
|
296
|
+
),
|
|
297
|
+
) -> None:
|
|
298
|
+
"""Run monitoring checks."""
|
|
299
|
+
_configure_logging(json_output)
|
|
300
|
+
settings = get_settings(env_file=env_file)
|
|
301
|
+
guardian = Guardian(settings)
|
|
302
|
+
reporter = Reporter(settings)
|
|
303
|
+
|
|
304
|
+
# Validate configuration
|
|
305
|
+
if not skip_validation:
|
|
306
|
+
warnings = guardian.validate_configuration()
|
|
307
|
+
if warnings:
|
|
308
|
+
console.print("[bold yellow]Configuration Warnings:[/bold yellow]")
|
|
309
|
+
for warning in warnings:
|
|
310
|
+
console.print(f" ⚠ {warning}")
|
|
311
|
+
console.print()
|
|
312
|
+
|
|
313
|
+
async def run_check():
|
|
314
|
+
report = await guardian.run_checks()
|
|
315
|
+
|
|
316
|
+
if json_output:
|
|
317
|
+
report_dict = reporter._report_to_dict(report)
|
|
318
|
+
console.print(json.dumps(report_dict, indent=2))
|
|
319
|
+
else:
|
|
320
|
+
await reporter.report(report)
|
|
321
|
+
|
|
322
|
+
if watch:
|
|
323
|
+
# Don't run watch mode if no checkers configured
|
|
324
|
+
if not skip_validation and not guardian.checkers:
|
|
325
|
+
console.print(
|
|
326
|
+
"[bold red]Error: No checkers configured. Cannot run in watch mode.[/bold red]"
|
|
327
|
+
)
|
|
328
|
+
console.print("Configure at least one checker or use --skip-validation to proceed.")
|
|
329
|
+
raise typer.Exit(code=1)
|
|
330
|
+
|
|
331
|
+
interval_seconds = interval or settings.check_interval_seconds
|
|
332
|
+
console.print(f"[bold]Watching with interval: {interval_seconds}s[/bold]\n")
|
|
333
|
+
|
|
334
|
+
async def watch_loop():
|
|
335
|
+
while True:
|
|
336
|
+
await run_check()
|
|
337
|
+
await asyncio.sleep(interval_seconds)
|
|
338
|
+
|
|
339
|
+
asyncio.run(watch_loop())
|
|
340
|
+
else:
|
|
341
|
+
asyncio.run(run_check())
|
|
342
|
+
|
|
343
|
+
|
|
344
|
+
@app.command()
|
|
345
|
+
def config() -> None:
|
|
346
|
+
"""Show current configuration."""
|
|
347
|
+
settings = get_settings()
|
|
348
|
+
|
|
349
|
+
console.print("[bold blue]Guardian Configuration[/bold blue]\n")
|
|
350
|
+
|
|
351
|
+
console.print(f"GitHub: {'✓' if settings.github_token else '✗'}")
|
|
352
|
+
if settings.github_org:
|
|
353
|
+
console.print(f" Organization: {settings.github_org}")
|
|
354
|
+
if settings.github_repos_to_monitor:
|
|
355
|
+
console.print(f" Repos: {', '.join(settings.github_repos_to_monitor)}")
|
|
356
|
+
|
|
357
|
+
console.print(f"\nVercel: {'✓' if settings.vercel_token else '✗'}")
|
|
358
|
+
if settings.vercel_team_id:
|
|
359
|
+
console.print(f" Team ID: {settings.vercel_team_id}")
|
|
360
|
+
if settings.vercel_projects_to_monitor:
|
|
361
|
+
console.print(f" Projects: {', '.join(settings.vercel_projects_to_monitor)}")
|
|
362
|
+
|
|
363
|
+
console.print(f"\nFly.io: {'✓' if settings.fly_api_token else '✗'}")
|
|
364
|
+
if settings.fly_apps_to_monitor:
|
|
365
|
+
console.print(f" Apps: {', '.join(settings.fly_apps_to_monitor)}")
|
|
366
|
+
|
|
367
|
+
console.print(f"\nnpm: {'✓' if settings.npm_packages_to_monitor else '✗'}")
|
|
368
|
+
if settings.npm_packages_to_monitor:
|
|
369
|
+
console.print(f" Packages: {', '.join(settings.npm_packages_to_monitor)}")
|
|
370
|
+
if settings.snyk_token:
|
|
371
|
+
console.print(" Snyk: ✓")
|
|
372
|
+
if settings.npm_security_enabled:
|
|
373
|
+
console.print(" Deep Security Analysis: ✓")
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
@app.command()
|
|
377
|
+
def auth(
|
|
378
|
+
service: str = typer.Argument(..., help="Service to authenticate (gh, vercel, fly, snyk)"),
|
|
379
|
+
token: str = typer.Option(
|
|
380
|
+
None, "--token", "-t", help="Token value (if not provided, will prompt)"
|
|
381
|
+
),
|
|
382
|
+
test: bool = typer.Option(False, "--test", help="Test the token after setting it"),
|
|
383
|
+
) -> None:
|
|
384
|
+
"""Authenticate with a service by setting API token."""
|
|
385
|
+
service = service.lower()
|
|
386
|
+
valid_services = ["gh", "github", "vercel", "fly", "snyk"]
|
|
387
|
+
|
|
388
|
+
if service not in valid_services:
|
|
389
|
+
console.print(f"[bold red]Error: Invalid service '{service}'[/bold red]")
|
|
390
|
+
console.print(f"Valid services: {', '.join(valid_services)}")
|
|
391
|
+
raise typer.Exit(code=1)
|
|
392
|
+
|
|
393
|
+
# Get token
|
|
394
|
+
if not token:
|
|
395
|
+
console.print(f"[bold]Setting up {service.upper()} authentication[/bold]")
|
|
396
|
+
token = Prompt.ask(f"Enter your {service.upper()} token", password=True)
|
|
397
|
+
if not token:
|
|
398
|
+
console.print("[bold red]Error: Token cannot be empty[/bold red]")
|
|
399
|
+
raise typer.Exit(code=1)
|
|
400
|
+
|
|
401
|
+
# Determine env var name
|
|
402
|
+
env_var_map = {
|
|
403
|
+
"gh": "GITHUB_TOKEN",
|
|
404
|
+
"github": "GITHUB_TOKEN", # Alias for backwards compatibility
|
|
405
|
+
"vercel": "VERCEL_TOKEN",
|
|
406
|
+
"fly": "FLY_API_TOKEN",
|
|
407
|
+
"snyk": "SNYK_TOKEN",
|
|
408
|
+
}
|
|
409
|
+
env_var = env_var_map[service]
|
|
410
|
+
|
|
411
|
+
# Write to .env file
|
|
412
|
+
env_file = Path(".env")
|
|
413
|
+
env_content = ""
|
|
414
|
+
|
|
415
|
+
# Read existing .env if it exists
|
|
416
|
+
if env_file.exists():
|
|
417
|
+
env_content = env_file.read_text()
|
|
418
|
+
|
|
419
|
+
# Update or add the token
|
|
420
|
+
lines = env_content.split("\n") if env_content else []
|
|
421
|
+
updated = False
|
|
422
|
+
new_lines = []
|
|
423
|
+
|
|
424
|
+
for line in lines:
|
|
425
|
+
if line.startswith(f"{env_var}="):
|
|
426
|
+
new_lines.append(f"{env_var}={token}")
|
|
427
|
+
updated = True
|
|
428
|
+
else:
|
|
429
|
+
new_lines.append(line)
|
|
430
|
+
|
|
431
|
+
if not updated:
|
|
432
|
+
new_lines.append(f"{env_var}={token}")
|
|
433
|
+
|
|
434
|
+
# Write back to .env
|
|
435
|
+
env_file.write_text("\n".join(new_lines) + "\n")
|
|
436
|
+
|
|
437
|
+
console.print(f"[bold green]✓[/bold green] {service.upper()} token saved to .env file")
|
|
438
|
+
|
|
439
|
+
# Test the token if requested
|
|
440
|
+
if test:
|
|
441
|
+
console.print(f"\n[bold]Testing {service.upper()} token...[/bold]")
|
|
442
|
+
from devguard.cli_helpers import test_service_token
|
|
443
|
+
|
|
444
|
+
success, message = asyncio.run(test_service_token(service, token))
|
|
445
|
+
if success:
|
|
446
|
+
console.print(f"[bold green]✓[/bold green] {message}")
|
|
447
|
+
else:
|
|
448
|
+
console.print(f"[bold red]✗[/bold red] {message}")
|
|
449
|
+
console.print("[yellow]Token saved but test failed. Please verify manually.[/yellow]")
|
|
450
|
+
|
|
451
|
+
console.print(
|
|
452
|
+
"\n[bold]Note:[/bold] Restart Guardian or reload environment to use the new token."
|
|
453
|
+
)
|
|
454
|
+
|
|
455
|
+
|
|
456
|
+
@app.command()
|
|
457
|
+
def mcp() -> None:
|
|
458
|
+
"""Start the Guardian MCP server."""
|
|
459
|
+
from devguard.mcp_server import run_mcp_server
|
|
460
|
+
|
|
461
|
+
console.print("[bold green]Starting Guardian MCP Server...[/bold green]")
|
|
462
|
+
run_mcp_server()
|
|
463
|
+
|
|
464
|
+
|
|
465
|
+
@app.command()
|
|
466
|
+
def auth_status() -> None:
|
|
467
|
+
"""Show authentication status for all services."""
|
|
468
|
+
from devguard.cli_helpers import show_auth_status
|
|
469
|
+
|
|
470
|
+
settings = get_settings()
|
|
471
|
+
show_auth_status(settings)
|
|
472
|
+
|
|
473
|
+
|
|
474
|
+
@app.command()
|
|
475
|
+
def dashboard(
|
|
476
|
+
host: str = typer.Option(None, "--host", help="Host to bind to"),
|
|
477
|
+
port: int = typer.Option(None, "--port", help="Port to bind to"),
|
|
478
|
+
) -> None:
|
|
479
|
+
"""Start the web dashboard server."""
|
|
480
|
+
settings = get_settings()
|
|
481
|
+
|
|
482
|
+
if not settings.dashboard_enabled and not host:
|
|
483
|
+
console.print("[bold yellow]Warning:[/bold yellow] Dashboard is not enabled in config.")
|
|
484
|
+
console.print("Set DASHBOARD_ENABLED=true or use --host/--port to override.")
|
|
485
|
+
console.print()
|
|
486
|
+
|
|
487
|
+
if settings.dashboard_api_key:
|
|
488
|
+
console.print("[bold green]✓[/bold green] Dashboard API key configured")
|
|
489
|
+
else:
|
|
490
|
+
console.print(
|
|
491
|
+
"[bold yellow]⚠[/bold yellow] DASHBOARD_API_KEY not set - "
|
|
492
|
+
"dashboard will be accessible without authentication (development mode)"
|
|
493
|
+
)
|
|
494
|
+
console.print("Generate a key with: [dim]openssl rand -hex 32[/dim]")
|
|
495
|
+
console.print()
|
|
496
|
+
|
|
497
|
+
console.print("[bold]Starting Guardian dashboard...[/bold]")
|
|
498
|
+
dashboard_url = f"http://{host or settings.dashboard_host}:{port or settings.dashboard_port}"
|
|
499
|
+
console.print(f"Access at: {dashboard_url}")
|
|
500
|
+
console.print()
|
|
501
|
+
|
|
502
|
+
try:
|
|
503
|
+
run_dashboard(host=host, port=port)
|
|
504
|
+
except KeyboardInterrupt:
|
|
505
|
+
console.print("\n[bold]Dashboard stopped[/bold]")
|
|
506
|
+
raise typer.Exit(0)
|
|
507
|
+
|
|
508
|
+
console.print()
|
|
509
|
+
|
|
510
|
+
|
|
511
|
+
@app.command()
|
|
512
|
+
def spec(
|
|
513
|
+
init: bool = typer.Option(False, "--init", help="Create a new spec file interactively"),
|
|
514
|
+
from_env: bool = typer.Option(False, "--from-env", help="Generate spec from current .env"),
|
|
515
|
+
edit: bool = typer.Option(False, "--edit", help="Open spec file in editor"),
|
|
516
|
+
) -> None:
|
|
517
|
+
"""Manage monitoring specifications."""
|
|
518
|
+
from pathlib import Path
|
|
519
|
+
|
|
520
|
+
from devguard.spec import MonitorSpec, get_default_spec, load_spec
|
|
521
|
+
|
|
522
|
+
spec_file = Path("devguard.spec.yaml")
|
|
523
|
+
|
|
524
|
+
if init:
|
|
525
|
+
if spec_file.exists():
|
|
526
|
+
if not typer.confirm(f"{spec_file} already exists. Overwrite?", default=False):
|
|
527
|
+
console.print("[yellow]Cancelled[/yellow]")
|
|
528
|
+
return
|
|
529
|
+
|
|
530
|
+
console.print("[bold]Creating new monitoring spec...[/bold]")
|
|
531
|
+
console.print()
|
|
532
|
+
|
|
533
|
+
# Ask basic questions
|
|
534
|
+
name = Prompt.ask("Spec name", default="default")
|
|
535
|
+
description = Prompt.ask("Description (optional)", default="")
|
|
536
|
+
|
|
537
|
+
# Ask what to discover
|
|
538
|
+
console.print("\n[bold]What should Guardian discover?[/bold]")
|
|
539
|
+
discover_npm = typer.confirm(" Discover npm packages?", default=True)
|
|
540
|
+
discover_github = typer.confirm(" Discover GitHub repos?", default=True)
|
|
541
|
+
discover_vercel = typer.confirm(" Discover Vercel projects?", default=True)
|
|
542
|
+
discover_fly = typer.confirm(" Discover Fly.io apps?", default=True)
|
|
543
|
+
discover_domains = typer.confirm(" Discover domains from configs?", default=False)
|
|
544
|
+
discover_commits = typer.confirm(" Track GitHub commits?", default=False)
|
|
545
|
+
discover_mentions = typer.confirm(" Track GitHub mentions?", default=False)
|
|
546
|
+
|
|
547
|
+
# Build spec
|
|
548
|
+
spec = MonitorSpec(name=name, description=description or None)
|
|
549
|
+
default_spec = get_default_spec()
|
|
550
|
+
|
|
551
|
+
# Copy relevant rules
|
|
552
|
+
rule_map = {
|
|
553
|
+
"npm": ["npm_list", "npm_package_json"],
|
|
554
|
+
"github": ["github_repos"],
|
|
555
|
+
"vercel": ["vercel_projects"],
|
|
556
|
+
"fly": ["fly_apps"],
|
|
557
|
+
"domains": ["domains"],
|
|
558
|
+
"commits": ["github_commits"],
|
|
559
|
+
"mentions": ["github_mentions"],
|
|
560
|
+
}
|
|
561
|
+
|
|
562
|
+
for key, rule_names in rule_map.items():
|
|
563
|
+
enabled = locals().get(f"discover_{key}", False)
|
|
564
|
+
if enabled:
|
|
565
|
+
for rule_name in rule_names:
|
|
566
|
+
rule = next(
|
|
567
|
+
(r for r in default_spec.discovery_rules if r.name == rule_name), None
|
|
568
|
+
)
|
|
569
|
+
if rule:
|
|
570
|
+
spec.discovery_rules.append(rule)
|
|
571
|
+
|
|
572
|
+
# Always include username discovery (needed for mentions/commits)
|
|
573
|
+
username_rule = next(
|
|
574
|
+
(r for r in default_spec.discovery_rules if r.name == "github_username"), None
|
|
575
|
+
)
|
|
576
|
+
if username_rule:
|
|
577
|
+
spec.discovery_rules.append(username_rule)
|
|
578
|
+
|
|
579
|
+
# Save spec
|
|
580
|
+
import yaml
|
|
581
|
+
|
|
582
|
+
spec_dict = spec.model_dump(exclude_none=True)
|
|
583
|
+
with open(spec_file, "w") as f:
|
|
584
|
+
yaml.dump(spec_dict, f, default_flow_style=False, sort_keys=False)
|
|
585
|
+
|
|
586
|
+
console.print(f"\n[bold green]✓[/bold green] Created {spec_file}")
|
|
587
|
+
console.print("[dim]Edit it to customize discovery rules[/dim]")
|
|
588
|
+
|
|
589
|
+
elif from_env:
|
|
590
|
+
# Generate spec from current .env
|
|
591
|
+
from devguard.config import get_settings
|
|
592
|
+
|
|
593
|
+
settings = get_settings()
|
|
594
|
+
spec = MonitorSpec(name="from_env", description="Generated from current .env")
|
|
595
|
+
|
|
596
|
+
# Add rules based on what's configured
|
|
597
|
+
default_spec = get_default_spec()
|
|
598
|
+
|
|
599
|
+
if settings.npm_packages_to_monitor:
|
|
600
|
+
spec.manual_resources["npm"] = settings.npm_packages_to_monitor
|
|
601
|
+
# Also enable discovery
|
|
602
|
+
spec.discovery_rules.extend(
|
|
603
|
+
[r for r in default_spec.discovery_rules if "npm" in r.name]
|
|
604
|
+
)
|
|
605
|
+
|
|
606
|
+
if settings.github_token:
|
|
607
|
+
if settings.github_repos_to_monitor:
|
|
608
|
+
spec.manual_resources["github"] = settings.github_repos_to_monitor
|
|
609
|
+
spec.discovery_rules.extend(
|
|
610
|
+
[r for r in default_spec.discovery_rules if "github" in r.name]
|
|
611
|
+
)
|
|
612
|
+
|
|
613
|
+
if settings.vercel_token:
|
|
614
|
+
if settings.vercel_projects_to_monitor:
|
|
615
|
+
spec.manual_resources["vercel"] = settings.vercel_projects_to_monitor
|
|
616
|
+
spec.discovery_rules.extend(
|
|
617
|
+
[r for r in default_spec.discovery_rules if "vercel" in r.name]
|
|
618
|
+
)
|
|
619
|
+
|
|
620
|
+
if settings.fly_api_token:
|
|
621
|
+
if settings.fly_apps_to_monitor:
|
|
622
|
+
spec.manual_resources["fly"] = settings.fly_apps_to_monitor
|
|
623
|
+
spec.discovery_rules.extend(
|
|
624
|
+
[r for r in default_spec.discovery_rules if "fly" in r.name]
|
|
625
|
+
)
|
|
626
|
+
|
|
627
|
+
# Save
|
|
628
|
+
import yaml
|
|
629
|
+
|
|
630
|
+
spec_dict = spec.model_dump(exclude_none=True)
|
|
631
|
+
with open(spec_file, "w") as f:
|
|
632
|
+
yaml.dump(spec_dict, f, default_flow_style=False, sort_keys=False)
|
|
633
|
+
|
|
634
|
+
console.print(f"[bold green]✓[/bold green] Generated {spec_file} from current .env")
|
|
635
|
+
|
|
636
|
+
elif edit:
|
|
637
|
+
import os
|
|
638
|
+
import subprocess
|
|
639
|
+
|
|
640
|
+
editor = os.environ.get("EDITOR", "nano")
|
|
641
|
+
try:
|
|
642
|
+
subprocess.run([editor, str(spec_file)])
|
|
643
|
+
except Exception as e:
|
|
644
|
+
console.print(f"[bold red]Error opening editor: {e}[/bold red]")
|
|
645
|
+
console.print(f"Edit {spec_file} manually")
|
|
646
|
+
|
|
647
|
+
else:
|
|
648
|
+
# Show current spec
|
|
649
|
+
if spec_file.exists():
|
|
650
|
+
try:
|
|
651
|
+
spec = load_spec(spec_file)
|
|
652
|
+
console.print(f"[bold blue]Current Spec: {spec.name}[/bold blue]")
|
|
653
|
+
if spec.description:
|
|
654
|
+
console.print(f"[dim]{spec.description}[/dim]")
|
|
655
|
+
console.print(f"\nDiscovery Rules: {len(spec.discovery_rules)}")
|
|
656
|
+
for rule in spec.discovery_rules:
|
|
657
|
+
status = "✓" if rule.enabled else "○"
|
|
658
|
+
console.print(f" {status} {rule.name} ({rule.type})")
|
|
659
|
+
if spec.manual_resources:
|
|
660
|
+
console.print("\nManual Resources:")
|
|
661
|
+
for rtype, resources in spec.manual_resources.items():
|
|
662
|
+
console.print(f" {rtype}: {len(resources)} items")
|
|
663
|
+
except Exception as e:
|
|
664
|
+
console.print(f"[bold red]Error loading spec: {e}[/bold red]")
|
|
665
|
+
else:
|
|
666
|
+
console.print(f"[yellow]No spec file found: {spec_file}[/yellow]")
|
|
667
|
+
console.print("Run [bold]devguard spec --init[/bold] to create one")
|
|
668
|
+
|
|
669
|
+
|
|
670
|
+
@app.command()
|
|
671
|
+
def discover(
|
|
672
|
+
spec_file: str = typer.Option(
|
|
673
|
+
"devguard.spec.yaml", "--spec", "-s", help="Path to monitoring spec file"
|
|
674
|
+
),
|
|
675
|
+
base_path: str = typer.Option(
|
|
676
|
+
None, "--base-path", "-b", help="Base path for file scanning (default: ~/Documents/dev)"
|
|
677
|
+
),
|
|
678
|
+
json_output: bool = typer.Option(False, "--json", "-j", help="Output as JSON"),
|
|
679
|
+
update_env: bool = typer.Option(
|
|
680
|
+
False, "--update-env", help="Update .env file with discovered resources"
|
|
681
|
+
),
|
|
682
|
+
env_file: str = typer.Option(
|
|
683
|
+
".env",
|
|
684
|
+
"--env-file",
|
|
685
|
+
help="Env file path to write when using --update-env (default: .env).",
|
|
686
|
+
),
|
|
687
|
+
) -> None:
|
|
688
|
+
"""Auto-discover resources to monitor based on spec."""
|
|
689
|
+
from pathlib import Path
|
|
690
|
+
|
|
691
|
+
from devguard.discovery import discover_all
|
|
692
|
+
from devguard.spec import load_spec
|
|
693
|
+
|
|
694
|
+
# Load spec
|
|
695
|
+
spec_path = Path(spec_file)
|
|
696
|
+
if spec_path.exists():
|
|
697
|
+
try:
|
|
698
|
+
spec = load_spec(spec_path)
|
|
699
|
+
except Exception as e:
|
|
700
|
+
console.print(f"[bold red]Error loading spec: {e}[/bold red]")
|
|
701
|
+
raise typer.Exit(1)
|
|
702
|
+
else:
|
|
703
|
+
console.print(f"[bold yellow]Spec file not found: {spec_path}[/bold yellow]")
|
|
704
|
+
console.print("Using default spec...")
|
|
705
|
+
from devguard.spec import get_default_spec
|
|
706
|
+
|
|
707
|
+
spec = get_default_spec()
|
|
708
|
+
|
|
709
|
+
# Determine base path
|
|
710
|
+
if base_path:
|
|
711
|
+
base = Path(base_path)
|
|
712
|
+
else:
|
|
713
|
+
base = Path.home() / "Documents" / "dev"
|
|
714
|
+
|
|
715
|
+
console.print(f"[bold]Discovering resources from: {base}[/bold]")
|
|
716
|
+
console.print(f"[dim]Using spec: {spec.name}[/dim]\n")
|
|
717
|
+
|
|
718
|
+
async def run_discovery():
|
|
719
|
+
result = await discover_all(spec, base)
|
|
720
|
+
return result
|
|
721
|
+
|
|
722
|
+
result = asyncio.run(run_discovery())
|
|
723
|
+
|
|
724
|
+
if json_output:
|
|
725
|
+
console.print(json.dumps(result.to_dict(), indent=2))
|
|
726
|
+
else:
|
|
727
|
+
# Display results
|
|
728
|
+
from rich.table import Table
|
|
729
|
+
|
|
730
|
+
console.print("[bold blue]Discovery Results[/bold blue]\n")
|
|
731
|
+
|
|
732
|
+
if result.resources:
|
|
733
|
+
table = Table(title="Discovered Resources")
|
|
734
|
+
table.add_column("Type", style="cyan")
|
|
735
|
+
table.add_column("Count", style="magenta")
|
|
736
|
+
table.add_column("Examples", style="green")
|
|
737
|
+
|
|
738
|
+
for resource_type, resources in result.resources.items():
|
|
739
|
+
count = len(resources)
|
|
740
|
+
examples = ", ".join(str(r)[:50] for r in resources[:3])
|
|
741
|
+
if count > 3:
|
|
742
|
+
examples += f" ... (+{count - 3} more)"
|
|
743
|
+
table.add_row(resource_type, str(count), examples)
|
|
744
|
+
|
|
745
|
+
console.print(table)
|
|
746
|
+
console.print()
|
|
747
|
+
|
|
748
|
+
if result.errors:
|
|
749
|
+
console.print("[bold red]Errors:[/bold red]")
|
|
750
|
+
for error in result.errors:
|
|
751
|
+
console.print(f" • {error}")
|
|
752
|
+
console.print()
|
|
753
|
+
|
|
754
|
+
# Update .env if requested
|
|
755
|
+
if update_env:
|
|
756
|
+
env_path = Path(env_file)
|
|
757
|
+
env_content = env_path.read_text() if env_path.exists() else ""
|
|
758
|
+
|
|
759
|
+
# Update npm packages
|
|
760
|
+
if "npm" in result.resources:
|
|
761
|
+
npm_packages = result.resources["npm"]
|
|
762
|
+
env_content = _update_env_var(
|
|
763
|
+
env_content, "NPM_PACKAGES_TO_MONITOR", ",".join(npm_packages[:20])
|
|
764
|
+
)
|
|
765
|
+
|
|
766
|
+
# Update GitHub repos
|
|
767
|
+
if "github" in result.resources:
|
|
768
|
+
github_repos = result.resources["github"]
|
|
769
|
+
env_content = _update_env_var(
|
|
770
|
+
env_content, "GITHUB_REPOS_TO_MONITOR", ",".join(github_repos[:20])
|
|
771
|
+
)
|
|
772
|
+
|
|
773
|
+
# Update Vercel projects
|
|
774
|
+
if "vercel" in result.resources:
|
|
775
|
+
vercel_projects = result.resources["vercel"]
|
|
776
|
+
env_content = _update_env_var(
|
|
777
|
+
env_content, "VERCEL_PROJECTS_TO_MONITOR", ",".join(vercel_projects[:20])
|
|
778
|
+
)
|
|
779
|
+
|
|
780
|
+
# Update Fly apps
|
|
781
|
+
if "fly" in result.resources:
|
|
782
|
+
fly_apps = result.resources["fly"]
|
|
783
|
+
env_content = _update_env_var(
|
|
784
|
+
env_content, "FLY_APPS_TO_MONITOR", ",".join(fly_apps[:20])
|
|
785
|
+
)
|
|
786
|
+
|
|
787
|
+
env_path.write_text(env_content)
|
|
788
|
+
console.print("[bold green]✓[/bold green] Updated .env file with discovered resources")
|
|
789
|
+
|
|
790
|
+
|
|
791
|
+
@app.command()
|
|
792
|
+
def stats(
|
|
793
|
+
live_mode: bool = typer.Option(
|
|
794
|
+
False, "--live", "-l", help="Live updating stats (refresh every 5s)"
|
|
795
|
+
),
|
|
796
|
+
) -> None:
|
|
797
|
+
"""Show current monitoring statistics in a TUI."""
|
|
798
|
+
from rich.console import Console
|
|
799
|
+
from rich.layout import Layout
|
|
800
|
+
from rich.live import Live
|
|
801
|
+
from rich.panel import Panel
|
|
802
|
+
from rich.table import Table
|
|
803
|
+
|
|
804
|
+
console = Console()
|
|
805
|
+
|
|
806
|
+
def generate_stats() -> Layout:
|
|
807
|
+
"""Generate the stats layout."""
|
|
808
|
+
settings = get_settings()
|
|
809
|
+
guardian = Guardian(settings)
|
|
810
|
+
|
|
811
|
+
# Run checks to get current stats
|
|
812
|
+
import asyncio
|
|
813
|
+
|
|
814
|
+
report = asyncio.run(guardian.run_checks())
|
|
815
|
+
|
|
816
|
+
# Create layout
|
|
817
|
+
layout = Layout()
|
|
818
|
+
|
|
819
|
+
# Header
|
|
820
|
+
header = Panel.fit(
|
|
821
|
+
"[bold blue]🛡️ Guardian Monitoring Stats[/bold blue]",
|
|
822
|
+
border_style="blue",
|
|
823
|
+
)
|
|
824
|
+
|
|
825
|
+
# Summary table
|
|
826
|
+
summary_table = Table(show_header=False, box=None, padding=(0, 2))
|
|
827
|
+
summary_table.add_column("Metric", style="cyan", width=25)
|
|
828
|
+
summary_table.add_column("Value", style="magenta")
|
|
829
|
+
|
|
830
|
+
summary = report.summary
|
|
831
|
+
summary_table.add_row("Total Checks", str(summary.get("total_checks", 0)))
|
|
832
|
+
summary_table.add_row("Successful", f"[green]{summary.get('successful_checks', 0)}[/green]")
|
|
833
|
+
summary_table.add_row("Failed", f"[red]{summary.get('failed_checks', 0)}[/red]")
|
|
834
|
+
summary_table.add_row("", "")
|
|
835
|
+
summary_table.add_row("Vulnerabilities", str(summary.get("total_vulnerabilities", 0)))
|
|
836
|
+
summary_table.add_row(
|
|
837
|
+
"Critical", f"[red]{summary.get('critical_vulnerabilities', 0)}[/red]"
|
|
838
|
+
)
|
|
839
|
+
summary_table.add_row(
|
|
840
|
+
"Unhealthy Deployments", f"[yellow]{summary.get('unhealthy_deployments', 0)}[/yellow]"
|
|
841
|
+
)
|
|
842
|
+
summary_table.add_row("Repository Alerts", str(summary.get("open_repository_alerts", 0)))
|
|
843
|
+
summary_table.add_row("", "")
|
|
844
|
+
summary_table.add_row(
|
|
845
|
+
"Total Cost (USD)",
|
|
846
|
+
f"[bold yellow]${summary.get('total_cost_usd', 0):.2f}[/bold yellow]",
|
|
847
|
+
)
|
|
848
|
+
|
|
849
|
+
summary_panel = Panel(summary_table, title="Summary", border_style="blue")
|
|
850
|
+
|
|
851
|
+
# Checks table
|
|
852
|
+
checks_table = Table(title="Check Results")
|
|
853
|
+
checks_table.add_column("Service", style="cyan")
|
|
854
|
+
checks_table.add_column("Status", style="magenta")
|
|
855
|
+
checks_table.add_column("Vulns", justify="right")
|
|
856
|
+
checks_table.add_column("Deployments", justify="right")
|
|
857
|
+
checks_table.add_column("Cost", justify="right", style="yellow")
|
|
858
|
+
|
|
859
|
+
for check in report.checks:
|
|
860
|
+
status_icon = "[green]✓[/green]" if check.success else "[red]✗[/red]"
|
|
861
|
+
vuln_count = len(check.vulnerabilities)
|
|
862
|
+
deploy_count = len(check.deployments)
|
|
863
|
+
cost = sum(cm.amount or 0 for cm in check.cost_metrics)
|
|
864
|
+
cost_str = f"${cost:.2f}" if cost > 0 else "-"
|
|
865
|
+
|
|
866
|
+
checks_table.add_row(
|
|
867
|
+
check.check_type.upper(),
|
|
868
|
+
status_icon,
|
|
869
|
+
str(vuln_count),
|
|
870
|
+
str(deploy_count),
|
|
871
|
+
cost_str,
|
|
872
|
+
)
|
|
873
|
+
|
|
874
|
+
checks_panel = Panel(checks_table, title="Service Checks", border_style="green")
|
|
875
|
+
|
|
876
|
+
# Cost breakdown
|
|
877
|
+
cost_table = Table(title="Cost Breakdown", show_header=False, box=None)
|
|
878
|
+
cost_table.add_column("Service", style="cyan")
|
|
879
|
+
cost_table.add_column("Amount", justify="right", style="yellow")
|
|
880
|
+
cost_table.add_column("Usage", style="green")
|
|
881
|
+
|
|
882
|
+
all_cost_metrics = report.get_cost_metrics()
|
|
883
|
+
if all_cost_metrics:
|
|
884
|
+
for cm in all_cost_metrics:
|
|
885
|
+
amount_str = f"${cm.amount:.2f}" if cm.amount else "$0.00"
|
|
886
|
+
usage_str = f"{cm.usage:.0f}/{cm.limit:.0f}" if cm.limit else f"{cm.usage:.0f}"
|
|
887
|
+
usage_pct = f"({cm.usage_percent:.1f}%)" if cm.usage_percent else ""
|
|
888
|
+
cost_table.add_row(cm.service, amount_str, f"{usage_str} {usage_pct}")
|
|
889
|
+
else:
|
|
890
|
+
cost_table.add_row("No cost data", "-", "-")
|
|
891
|
+
|
|
892
|
+
cost_panel = Panel(cost_table, title="Cost Metrics", border_style="yellow")
|
|
893
|
+
|
|
894
|
+
# Layout structure
|
|
895
|
+
layout.split_column(
|
|
896
|
+
Layout(header, size=3),
|
|
897
|
+
Layout(summary_panel, name="summary"),
|
|
898
|
+
Layout(checks_panel, name="checks"),
|
|
899
|
+
Layout(cost_panel, name="costs"),
|
|
900
|
+
)
|
|
901
|
+
|
|
902
|
+
layout["summary"].size = 12
|
|
903
|
+
layout["checks"].size = None
|
|
904
|
+
layout["costs"].size = None
|
|
905
|
+
|
|
906
|
+
return layout
|
|
907
|
+
|
|
908
|
+
if live_mode:
|
|
909
|
+
# Live updating mode
|
|
910
|
+
with Live(generate_stats(), refresh_per_second=0.2, screen=True) as live_view:
|
|
911
|
+
import time
|
|
912
|
+
|
|
913
|
+
while True:
|
|
914
|
+
try:
|
|
915
|
+
time.sleep(5)
|
|
916
|
+
live_view.update(generate_stats())
|
|
917
|
+
except KeyboardInterrupt:
|
|
918
|
+
break
|
|
919
|
+
else:
|
|
920
|
+
# Single snapshot
|
|
921
|
+
console.print(generate_stats())
|
|
922
|
+
|
|
923
|
+
|
|
924
|
+
@app.command("sweep-dev")
|
|
925
|
+
def sweep_dev(
|
|
926
|
+
dev_root: str = typer.Option(
|
|
927
|
+
None,
|
|
928
|
+
"--dev-root",
|
|
929
|
+
help="Dev workspace root (default: $DEV_DIR or ~/Documents/dev).",
|
|
930
|
+
),
|
|
931
|
+
output: str = typer.Option(
|
|
932
|
+
"devguard_sweep_dev.json",
|
|
933
|
+
"--output",
|
|
934
|
+
"-o",
|
|
935
|
+
help="Where to write the JSON report.",
|
|
936
|
+
),
|
|
937
|
+
max_blob_mb: int = typer.Option(
|
|
938
|
+
5,
|
|
939
|
+
"--max-blob-mb",
|
|
940
|
+
help="Flag tracked files larger than this many MiB (working tree size).",
|
|
941
|
+
),
|
|
942
|
+
max_depth: int = typer.Option(
|
|
943
|
+
2,
|
|
944
|
+
"--max-depth",
|
|
945
|
+
help="How deep under dev_root to look for git repos (bounded).",
|
|
946
|
+
),
|
|
947
|
+
) -> None:
|
|
948
|
+
"""Sweep local dev repos for likely accidental committed artifacts."""
|
|
949
|
+
from devguard.sweeps.local_dev import default_dev_root, sweep_dev_repos, write_report
|
|
950
|
+
|
|
951
|
+
root = Path(dev_root).expanduser() if dev_root else default_dev_root()
|
|
952
|
+
report_path = Path(output).expanduser()
|
|
953
|
+
|
|
954
|
+
hits, meta = sweep_dev_repos(
|
|
955
|
+
root,
|
|
956
|
+
max_blob_bytes=max_blob_mb * 1024 * 1024,
|
|
957
|
+
max_depth=max_depth,
|
|
958
|
+
)
|
|
959
|
+
write_report(report_path, hits, meta)
|
|
960
|
+
|
|
961
|
+
console.print(f"[bold]Wrote report:[/bold] {report_path}")
|
|
962
|
+
console.print(f"[bold]Repos scanned:[/bold] {meta['repos_scanned']}")
|
|
963
|
+
console.print(f"[bold]Findings:[/bold] {len(hits)}")
|
|
964
|
+
_print_local_dev_table(hits)
|
|
965
|
+
|
|
966
|
+
if hits:
|
|
967
|
+
console.print(
|
|
968
|
+
"[bold yellow]Action:[/bold yellow] Review report and clean up flagged files."
|
|
969
|
+
)
|
|
970
|
+
raise typer.Exit(code=2)
|
|
971
|
+
|
|
972
|
+
|
|
973
|
+
@app.command()
|
|
974
|
+
def sweep(
|
|
975
|
+
spec_file: str = typer.Option(
|
|
976
|
+
"devguard.spec.yaml",
|
|
977
|
+
"--spec",
|
|
978
|
+
"-s",
|
|
979
|
+
help="Path to spec file (drives which sweeps run and their policy).",
|
|
980
|
+
),
|
|
981
|
+
dev_root: str = typer.Option(
|
|
982
|
+
None,
|
|
983
|
+
"--dev-root",
|
|
984
|
+
help="Dev workspace root (default: $DEV_DIR or ~/Documents/dev).",
|
|
985
|
+
),
|
|
986
|
+
only: list[str] = typer.Option(
|
|
987
|
+
None,
|
|
988
|
+
"--only",
|
|
989
|
+
help="Run only these sweeps (repeatable). Known: local_dev, public_github_secrets, local_dirty_worktree_secrets, project_flaudit, gitignore_audit, dependency_audit, ssh_key_audit, cargo_publish_audit, ai_editor_config_audit",
|
|
990
|
+
),
|
|
991
|
+
format: str = typer.Option(
|
|
992
|
+
"text",
|
|
993
|
+
"--format",
|
|
994
|
+
"-f",
|
|
995
|
+
help="Output format: text, json, sarif",
|
|
996
|
+
),
|
|
997
|
+
) -> None:
|
|
998
|
+
"""Run spec-driven sweeps (policy checks).
|
|
999
|
+
|
|
1000
|
+
Today this runs the local dev repo sweep (and will expand over time).
|
|
1001
|
+
"""
|
|
1002
|
+
import sys
|
|
1003
|
+
|
|
1004
|
+
from devguard.spec import MonitorSpec, SweepSpec, load_spec
|
|
1005
|
+
from devguard.sweeps.local_dev import DEFAULT_DENY_GLOBS, default_dev_root, sweep_dev_repos
|
|
1006
|
+
from devguard.sweeps.local_dirty_worktree_secrets import (
|
|
1007
|
+
scan_dirty_worktrees,
|
|
1008
|
+
)
|
|
1009
|
+
from devguard.sweeps.local_dirty_worktree_secrets import (
|
|
1010
|
+
write_report as write_dirty_json,
|
|
1011
|
+
)
|
|
1012
|
+
from devguard.sweeps.public_github_secrets import scan_public_github_repos
|
|
1013
|
+
from devguard.sweeps.public_github_secrets import write_report as write_json
|
|
1014
|
+
|
|
1015
|
+
machine_output = format in ("json", "sarif")
|
|
1016
|
+
sweep_reports: list[tuple[str, dict]] = []
|
|
1017
|
+
|
|
1018
|
+
stderr_console = Console(stderr=True)
|
|
1019
|
+
spec_path = Path(spec_file)
|
|
1020
|
+
if not spec_path.exists():
|
|
1021
|
+
stderr_console.print(
|
|
1022
|
+
"No spec file found; using defaults. Create devguard.spec.yaml to customize."
|
|
1023
|
+
)
|
|
1024
|
+
spec = MonitorSpec(
|
|
1025
|
+
name="default",
|
|
1026
|
+
discovery_rules=[],
|
|
1027
|
+
manual_resources={},
|
|
1028
|
+
filters={},
|
|
1029
|
+
sweeps=SweepSpec(),
|
|
1030
|
+
)
|
|
1031
|
+
else:
|
|
1032
|
+
spec = load_spec(spec_path)
|
|
1033
|
+
|
|
1034
|
+
wanted = {w.strip() for w in (only or []) if w and w.strip()}
|
|
1035
|
+
|
|
1036
|
+
exit_code = 0
|
|
1037
|
+
|
|
1038
|
+
# local-dev sweep
|
|
1039
|
+
local = spec.sweeps.local_dev
|
|
1040
|
+
if local.enabled and (not wanted or "local_dev" in wanted):
|
|
1041
|
+
root = Path(dev_root).expanduser() if dev_root else default_dev_root()
|
|
1042
|
+
deny = list(DEFAULT_DENY_GLOBS) + list(local.deny_globs or [])
|
|
1043
|
+
hits, meta = sweep_dev_repos(
|
|
1044
|
+
root,
|
|
1045
|
+
deny_globs=deny,
|
|
1046
|
+
max_blob_bytes=local.max_blob_mb * 1024 * 1024,
|
|
1047
|
+
max_depth=local.max_depth,
|
|
1048
|
+
)
|
|
1049
|
+
out_path = Path(local.output).expanduser()
|
|
1050
|
+
from devguard.sweeps.local_dev import write_report
|
|
1051
|
+
|
|
1052
|
+
write_report(out_path, hits, meta)
|
|
1053
|
+
if machine_output:
|
|
1054
|
+
from dataclasses import asdict
|
|
1055
|
+
|
|
1056
|
+
sweep_reports.append(
|
|
1057
|
+
(
|
|
1058
|
+
"local_dev",
|
|
1059
|
+
{**meta, "hits": [asdict(h) for h in hits]},
|
|
1060
|
+
)
|
|
1061
|
+
)
|
|
1062
|
+
else:
|
|
1063
|
+
console.print(f"[bold]local_dev report:[/bold] {out_path}")
|
|
1064
|
+
console.print(f"[bold]Repos scanned:[/bold] {meta['repos_scanned']}")
|
|
1065
|
+
console.print(f"[bold]Findings:[/bold] {len(hits)}")
|
|
1066
|
+
_print_local_dev_table(hits)
|
|
1067
|
+
if hits:
|
|
1068
|
+
exit_code = max(exit_code, 2)
|
|
1069
|
+
|
|
1070
|
+
# public-github-secrets sweep
|
|
1071
|
+
pub = spec.sweeps.public_github_secrets
|
|
1072
|
+
if pub.enabled and (not wanted or "public_github_secrets" in wanted):
|
|
1073
|
+
report, errors = scan_public_github_repos(
|
|
1074
|
+
owners=pub.owners,
|
|
1075
|
+
include_repos=pub.include_repos,
|
|
1076
|
+
exclude_repos=pub.exclude_repos,
|
|
1077
|
+
include_forks=pub.include_forks,
|
|
1078
|
+
max_repos=pub.max_repos,
|
|
1079
|
+
engines=getattr(pub, "engines", None),
|
|
1080
|
+
timeout_s=pub.timeout_s,
|
|
1081
|
+
max_concurrency=pub.max_concurrency,
|
|
1082
|
+
)
|
|
1083
|
+
out_path = Path(pub.output).expanduser()
|
|
1084
|
+
write_json(out_path, report)
|
|
1085
|
+
if machine_output:
|
|
1086
|
+
sweep_reports.append(("public_github_secrets", report))
|
|
1087
|
+
else:
|
|
1088
|
+
console.print(f"[bold]public_github_secrets report:[/bold] {out_path}")
|
|
1089
|
+
console.print(f"[bold]Repos scanned:[/bold] {report['scope']['repos_scanned_count']}")
|
|
1090
|
+
console.print(f"[bold]Findings:[/bold] {report['summary']['findings_total']}")
|
|
1091
|
+
if errors:
|
|
1092
|
+
console.print(f"[yellow]Errors:[/yellow] {len(errors)} (see report)")
|
|
1093
|
+
if pub.fail_on_errors:
|
|
1094
|
+
console.print(
|
|
1095
|
+
"[bold red]Action:[/bold red] Fix scan errors (missed coverage) and rerun."
|
|
1096
|
+
)
|
|
1097
|
+
_print_public_github_secrets_table(report)
|
|
1098
|
+
if errors and pub.fail_on_errors:
|
|
1099
|
+
exit_code = max(exit_code, 3)
|
|
1100
|
+
if report["summary"]["findings_total"] > 0:
|
|
1101
|
+
exit_code = max(exit_code, 2)
|
|
1102
|
+
|
|
1103
|
+
# local dirty worktree secret sweep
|
|
1104
|
+
dirty = spec.sweeps.local_dirty_worktree_secrets
|
|
1105
|
+
if dirty.enabled and (not wanted or "local_dirty_worktree_secrets" in wanted):
|
|
1106
|
+
root = Path(dirty.dev_root).expanduser() if dirty.dev_root else default_dev_root()
|
|
1107
|
+
report, errors = scan_dirty_worktrees(
|
|
1108
|
+
dev_root=root,
|
|
1109
|
+
max_depth=dirty.max_depth,
|
|
1110
|
+
only_dirty=dirty.only_dirty,
|
|
1111
|
+
exclude_repo_globs=dirty.exclude_repo_globs,
|
|
1112
|
+
check_upstream=dirty.check_upstream,
|
|
1113
|
+
fetch_remotes=dirty.fetch_remotes,
|
|
1114
|
+
max_paths_per_repo=getattr(dirty, "max_paths_per_repo", 50),
|
|
1115
|
+
include_ignored_files=getattr(dirty, "include_ignored_files", False),
|
|
1116
|
+
max_concurrency=dirty.max_concurrency,
|
|
1117
|
+
timeout_s=dirty.timeout_s,
|
|
1118
|
+
)
|
|
1119
|
+
out_path = Path(dirty.output).expanduser()
|
|
1120
|
+
write_dirty_json(out_path, report)
|
|
1121
|
+
if machine_output:
|
|
1122
|
+
sweep_reports.append(("local_dirty_worktree_secrets", report))
|
|
1123
|
+
else:
|
|
1124
|
+
console.print(f"[bold]local_dirty_worktree_secrets report:[/bold] {out_path}")
|
|
1125
|
+
console.print(f"[bold]Repos scanned:[/bold] {report['scope']['repos_scanned_count']}")
|
|
1126
|
+
console.print(f"[bold]Findings:[/bold] {report['summary']['findings_total']}")
|
|
1127
|
+
if errors:
|
|
1128
|
+
console.print(f"[yellow]Errors:[/yellow] {len(errors)} (see report)")
|
|
1129
|
+
_print_local_dirty_worktree_table(report)
|
|
1130
|
+
if report["summary"]["findings_total"] > 0:
|
|
1131
|
+
exit_code = max(exit_code, 2)
|
|
1132
|
+
|
|
1133
|
+
# project_flaudit sweep (files-to-prompt + OpenRouter/Gemini)
|
|
1134
|
+
flaudit = spec.sweeps.project_flaudit
|
|
1135
|
+
if flaudit.enabled and (not wanted or "project_flaudit" in wanted):
|
|
1136
|
+
from devguard.sweeps.local_dev import default_dev_root
|
|
1137
|
+
from devguard.sweeps.project_flaudit import scan_project_flaudit
|
|
1138
|
+
from devguard.sweeps.project_flaudit import write_report as write_flaudit
|
|
1139
|
+
|
|
1140
|
+
root = Path(flaudit.dev_root).expanduser() if flaudit.dev_root else default_dev_root()
|
|
1141
|
+
settings = get_settings()
|
|
1142
|
+
wr_path = None
|
|
1143
|
+
if flaudit.workspace_rules_path:
|
|
1144
|
+
p = Path(flaudit.workspace_rules_path).expanduser()
|
|
1145
|
+
if not p.is_absolute():
|
|
1146
|
+
p = root / p
|
|
1147
|
+
wr_path = str(p.resolve()) if p.resolve().is_dir() else None
|
|
1148
|
+
results, meta = scan_project_flaudit(
|
|
1149
|
+
dev_root=root,
|
|
1150
|
+
k_recent=flaudit.k_recent,
|
|
1151
|
+
max_depth=flaudit.max_depth,
|
|
1152
|
+
model_id=flaudit.model_id,
|
|
1153
|
+
settings=settings,
|
|
1154
|
+
max_prompt_chars=flaudit.max_prompt_chars,
|
|
1155
|
+
include_rules=flaudit.include_rules,
|
|
1156
|
+
exclude_repo_globs=flaudit.exclude_repo_globs,
|
|
1157
|
+
workspace_rules_path=wr_path,
|
|
1158
|
+
workspace_rules_include=flaudit.workspace_rules_include or None,
|
|
1159
|
+
max_workspace_rules_chars=flaudit.max_workspace_rules_chars,
|
|
1160
|
+
severity_guidance=flaudit.severity_guidance,
|
|
1161
|
+
depth_0_skip_prefixes=flaudit.depth_0_skip_prefixes,
|
|
1162
|
+
depth_0_allow_names=flaudit.depth_0_allow_names,
|
|
1163
|
+
scope_recent_commits=flaudit.scope_recent_commits,
|
|
1164
|
+
public_repo_names=flaudit.public_repo_names or None,
|
|
1165
|
+
stricter_public_prompt=flaudit.stricter_public_prompt,
|
|
1166
|
+
)
|
|
1167
|
+
out_path = Path(flaudit.output).expanduser()
|
|
1168
|
+
write_flaudit(out_path, results, meta)
|
|
1169
|
+
total_findings = sum(len(r.findings) for r in results)
|
|
1170
|
+
total_errors = sum(1 for r in results if r.error)
|
|
1171
|
+
if machine_output:
|
|
1172
|
+
sweep_reports.append(
|
|
1173
|
+
(
|
|
1174
|
+
"project_flaudit",
|
|
1175
|
+
json.loads(out_path.read_text()),
|
|
1176
|
+
)
|
|
1177
|
+
)
|
|
1178
|
+
else:
|
|
1179
|
+
console.print(f"[bold]project_flaudit report:[/bold] {out_path}")
|
|
1180
|
+
console.print(f"[bold]Projects analyzed:[/bold] {meta['repos_scanned']}")
|
|
1181
|
+
console.print(f"[bold]Findings:[/bold] {total_findings}")
|
|
1182
|
+
if total_errors:
|
|
1183
|
+
console.print(f"[bold]Errors:[/bold] {total_errors}", style="yellow")
|
|
1184
|
+
_print_project_flaudit_table(results)
|
|
1185
|
+
if total_findings > 0:
|
|
1186
|
+
exit_code = max(exit_code, 2)
|
|
1187
|
+
|
|
1188
|
+
# gitignore audit sweep
|
|
1189
|
+
gi = spec.sweeps.gitignore_audit
|
|
1190
|
+
if gi.enabled and (not wanted or "gitignore_audit" in wanted):
|
|
1191
|
+
from devguard.sweeps.gitignore_audit import audit_gitignores
|
|
1192
|
+
from devguard.sweeps.gitignore_audit import write_report as write_gi
|
|
1193
|
+
|
|
1194
|
+
root = Path(gi.dev_root).expanduser() if gi.dev_root else default_dev_root()
|
|
1195
|
+
report, errors = audit_gitignores(
|
|
1196
|
+
dev_root=root,
|
|
1197
|
+
max_depth=gi.max_depth,
|
|
1198
|
+
exclude_repo_globs=gi.exclude_repo_globs,
|
|
1199
|
+
)
|
|
1200
|
+
out_path = Path(gi.output).expanduser()
|
|
1201
|
+
write_gi(out_path, report)
|
|
1202
|
+
if machine_output:
|
|
1203
|
+
sweep_reports.append(("gitignore_audit", report))
|
|
1204
|
+
else:
|
|
1205
|
+
console.print(f"[bold]gitignore_audit report:[/bold] {out_path}")
|
|
1206
|
+
console.print(f"[bold]Repos scanned:[/bold] {report['scope']['repos_scanned']}")
|
|
1207
|
+
console.print(
|
|
1208
|
+
f"[bold]Repos without .gitignore:[/bold]"
|
|
1209
|
+
f" {report['summary']['repos_without_gitignore']}"
|
|
1210
|
+
)
|
|
1211
|
+
console.print(
|
|
1212
|
+
f"[bold]Public repos with gaps:[/bold]"
|
|
1213
|
+
f" {report['summary']['public_repos_with_gaps']}"
|
|
1214
|
+
)
|
|
1215
|
+
console.print(f"[bold]Total gaps:[/bold] {report['summary']['total_gaps']}")
|
|
1216
|
+
if errors:
|
|
1217
|
+
console.print(f"[yellow]Errors:[/yellow] {len(errors)} (see report)")
|
|
1218
|
+
_print_gitignore_audit_table(report)
|
|
1219
|
+
if report["summary"]["public_repos_with_gaps"] > 0:
|
|
1220
|
+
exit_code = max(exit_code, 2)
|
|
1221
|
+
|
|
1222
|
+
# dependency audit sweep
|
|
1223
|
+
depaudit = spec.sweeps.dependency_audit
|
|
1224
|
+
if depaudit.enabled and (not wanted or "dependency_audit" in wanted):
|
|
1225
|
+
from devguard.sweeps.dependency_audit import audit_dependencies
|
|
1226
|
+
from devguard.sweeps.dependency_audit import write_report as write_depaudit
|
|
1227
|
+
|
|
1228
|
+
root = Path(depaudit.dev_root).expanduser() if depaudit.dev_root else default_dev_root()
|
|
1229
|
+
report, errors = audit_dependencies(
|
|
1230
|
+
dev_root=root,
|
|
1231
|
+
max_depth=depaudit.max_depth,
|
|
1232
|
+
exclude_repo_globs=depaudit.exclude_repo_globs,
|
|
1233
|
+
engines=depaudit.engines,
|
|
1234
|
+
max_concurrency=depaudit.max_concurrency,
|
|
1235
|
+
timeout_s=depaudit.timeout_s,
|
|
1236
|
+
)
|
|
1237
|
+
out_path = Path(depaudit.output).expanduser()
|
|
1238
|
+
write_depaudit(out_path, report)
|
|
1239
|
+
if machine_output:
|
|
1240
|
+
sweep_reports.append(("dependency_audit", report))
|
|
1241
|
+
else:
|
|
1242
|
+
console.print(f"[bold]dependency_audit report:[/bold] {out_path}")
|
|
1243
|
+
console.print(f"[bold]Repos scanned:[/bold] {report['scope']['repos_scanned']}")
|
|
1244
|
+
console.print(f"[bold]Repos with vulns:[/bold] {report['summary']['repos_with_vulns']}")
|
|
1245
|
+
console.print(f"[bold]Total vulns:[/bold] {report['summary']['total_vulns']}")
|
|
1246
|
+
sev = report["summary"]["severity_counts"]
|
|
1247
|
+
console.print(
|
|
1248
|
+
f"[bold]Severity:[/bold] critical={sev.get('critical', 0)}"
|
|
1249
|
+
f" high={sev.get('high', 0)}"
|
|
1250
|
+
f" medium={sev.get('medium', 0)}"
|
|
1251
|
+
f" low={sev.get('low', 0)}"
|
|
1252
|
+
)
|
|
1253
|
+
if errors:
|
|
1254
|
+
console.print(f"[yellow]Errors:[/yellow] {len(errors)} (see report)")
|
|
1255
|
+
_print_dependency_audit_table(report)
|
|
1256
|
+
if report["summary"]["total_vulns"] > 0:
|
|
1257
|
+
exit_code = max(exit_code, 2)
|
|
1258
|
+
|
|
1259
|
+
# ssh key audit sweep
|
|
1260
|
+
sshk = spec.sweeps.ssh_key_audit
|
|
1261
|
+
if sshk.enabled and (not wanted or "ssh_key_audit" in wanted):
|
|
1262
|
+
from devguard.sweeps.ssh_key_audit import audit_ssh_keys
|
|
1263
|
+
from devguard.sweeps.ssh_key_audit import write_report as write_sshk
|
|
1264
|
+
|
|
1265
|
+
ssh_path = Path(sshk.ssh_dir).expanduser()
|
|
1266
|
+
report, errors = audit_ssh_keys(
|
|
1267
|
+
ssh_dir=ssh_path,
|
|
1268
|
+
check_github=sshk.check_github,
|
|
1269
|
+
min_rsa_bits=sshk.min_rsa_bits,
|
|
1270
|
+
flag_ecdsa=sshk.flag_ecdsa,
|
|
1271
|
+
)
|
|
1272
|
+
out_path = Path(sshk.output).expanduser()
|
|
1273
|
+
write_sshk(out_path, report)
|
|
1274
|
+
if machine_output:
|
|
1275
|
+
sweep_reports.append(("ssh_key_audit", report))
|
|
1276
|
+
else:
|
|
1277
|
+
console.print(f"[bold]ssh_key_audit report:[/bold] {out_path}")
|
|
1278
|
+
console.print(f"[bold]Keys scanned:[/bold] {report['summary']['keys_scanned']}")
|
|
1279
|
+
console.print(f"[bold]Issues:[/bold] {report['summary']['issues_total']}")
|
|
1280
|
+
if errors:
|
|
1281
|
+
console.print(f"[yellow]Errors:[/yellow] {len(errors)} (see report)")
|
|
1282
|
+
_print_ssh_key_audit_table(report)
|
|
1283
|
+
if report["summary"]["issues_total"] > 0:
|
|
1284
|
+
exit_code = max(exit_code, 2)
|
|
1285
|
+
|
|
1286
|
+
# cargo publish audit sweep
|
|
1287
|
+
cpub = spec.sweeps.cargo_publish_audit
|
|
1288
|
+
if cpub.enabled and (not wanted or "cargo_publish_audit" in wanted):
|
|
1289
|
+
from devguard.sweeps.cargo_publish_audit import audit_cargo_publish
|
|
1290
|
+
from devguard.sweeps.cargo_publish_audit import write_report as write_cpub
|
|
1291
|
+
|
|
1292
|
+
root = Path(cpub.dev_root).expanduser() if cpub.dev_root else default_dev_root()
|
|
1293
|
+
report, errors = audit_cargo_publish(
|
|
1294
|
+
dev_root=root,
|
|
1295
|
+
max_depth=cpub.max_depth,
|
|
1296
|
+
exclude_repo_globs=cpub.exclude_repo_globs,
|
|
1297
|
+
only_public=cpub.only_public,
|
|
1298
|
+
repo_names=cpub.repo_names or None,
|
|
1299
|
+
)
|
|
1300
|
+
out_path = Path(cpub.output).expanduser()
|
|
1301
|
+
write_cpub(out_path, report)
|
|
1302
|
+
if machine_output:
|
|
1303
|
+
sweep_reports.append(("cargo_publish_audit", report))
|
|
1304
|
+
else:
|
|
1305
|
+
console.print(f"[bold]cargo_publish_audit report:[/bold] {out_path}")
|
|
1306
|
+
console.print(f"[bold]Rust repos found:[/bold] {report['scope']['rust_repos_found']}")
|
|
1307
|
+
console.print(
|
|
1308
|
+
f"[bold]Repos with errors:[/bold] {report['summary']['repos_with_errors']}"
|
|
1309
|
+
)
|
|
1310
|
+
console.print(
|
|
1311
|
+
f"[bold]Repos with warnings:[/bold] {report['summary']['repos_with_warnings']}"
|
|
1312
|
+
)
|
|
1313
|
+
console.print(f"[bold]Total findings:[/bold] {report['summary']['total_findings']}")
|
|
1314
|
+
if report["summary"]["repos_with_errors_list"]:
|
|
1315
|
+
console.print(
|
|
1316
|
+
f"[red]Error repos:[/red]"
|
|
1317
|
+
f" {', '.join(report['summary']['repos_with_errors_list'])}"
|
|
1318
|
+
)
|
|
1319
|
+
if errors:
|
|
1320
|
+
console.print(f"[yellow]Errors:[/yellow] {len(errors)} (see report)")
|
|
1321
|
+
_print_cargo_publish_audit_table(report)
|
|
1322
|
+
if report["summary"]["total_errors"] > 0:
|
|
1323
|
+
exit_code = max(exit_code, 2)
|
|
1324
|
+
|
|
1325
|
+
# ai editor config audit sweep
|
|
1326
|
+
aicfg = spec.sweeps.ai_editor_config_audit
|
|
1327
|
+
if aicfg.enabled and (not wanted or "ai_editor_config_audit" in wanted):
|
|
1328
|
+
from devguard.sweeps.ai_editor_config_audit import audit_ai_editor_configs
|
|
1329
|
+
from devguard.sweeps.ai_editor_config_audit import write_report as write_aicfg
|
|
1330
|
+
|
|
1331
|
+
root = Path(aicfg.dev_root).expanduser() if aicfg.dev_root else default_dev_root()
|
|
1332
|
+
report, errors = audit_ai_editor_configs(
|
|
1333
|
+
dev_root=root,
|
|
1334
|
+
max_depth=aicfg.max_depth,
|
|
1335
|
+
exclude_repo_globs=aicfg.exclude_repo_globs,
|
|
1336
|
+
only_with_configs=aicfg.only_with_configs,
|
|
1337
|
+
)
|
|
1338
|
+
out_path = Path(aicfg.output).expanduser()
|
|
1339
|
+
write_aicfg(out_path, report)
|
|
1340
|
+
if machine_output:
|
|
1341
|
+
sweep_reports.append(("ai_editor_config_audit", report))
|
|
1342
|
+
else:
|
|
1343
|
+
console.print(f"[bold]ai_editor_config_audit report:[/bold] {out_path}")
|
|
1344
|
+
console.print(
|
|
1345
|
+
f"[bold]Repos with AI configs:[/bold] {report['scope']['repos_with_ai_configs']}"
|
|
1346
|
+
)
|
|
1347
|
+
console.print(
|
|
1348
|
+
f"[bold]Repos with errors:[/bold] {report['summary']['repos_with_errors']}"
|
|
1349
|
+
)
|
|
1350
|
+
console.print(
|
|
1351
|
+
f"[bold]Repos with warnings:[/bold] {report['summary']['repos_with_warnings']}"
|
|
1352
|
+
)
|
|
1353
|
+
console.print(f"[bold]Total findings:[/bold] {report['summary']['total_findings']}")
|
|
1354
|
+
if report["summary"].get("tool_adoption"):
|
|
1355
|
+
tools = ", ".join(f"{t}={c}" for t, c in report["summary"]["tool_adoption"])
|
|
1356
|
+
console.print(f"[bold]Tool adoption:[/bold] {tools}")
|
|
1357
|
+
if errors:
|
|
1358
|
+
console.print(f"[yellow]Errors:[/yellow] {len(errors)} (see report)")
|
|
1359
|
+
_print_ai_editor_config_table(report)
|
|
1360
|
+
if report["summary"]["total_errors"] > 0:
|
|
1361
|
+
exit_code = max(exit_code, 2)
|
|
1362
|
+
|
|
1363
|
+
# publish audit sweep (PyPI + npm)
|
|
1364
|
+
puba = spec.sweeps.publish_audit
|
|
1365
|
+
if puba.enabled and (not wanted or "publish_audit" in wanted):
|
|
1366
|
+
from devguard.sweeps.publish_audit import audit_publish
|
|
1367
|
+
from devguard.sweeps.publish_audit import write_report as write_puba
|
|
1368
|
+
|
|
1369
|
+
root = Path(puba.dev_root).expanduser() if puba.dev_root else default_dev_root()
|
|
1370
|
+
report, errors = audit_publish(
|
|
1371
|
+
dev_root=root,
|
|
1372
|
+
max_depth=puba.max_depth,
|
|
1373
|
+
exclude_repo_globs=puba.exclude_repo_globs,
|
|
1374
|
+
ecosystems=puba.ecosystems or None,
|
|
1375
|
+
)
|
|
1376
|
+
out_path = Path(puba.output).expanduser()
|
|
1377
|
+
write_puba(out_path, report)
|
|
1378
|
+
if machine_output:
|
|
1379
|
+
sweep_reports.append(("publish_audit", report))
|
|
1380
|
+
else:
|
|
1381
|
+
console.print(f"[bold]publish_audit report:[/bold] {out_path}")
|
|
1382
|
+
console.print(f"[bold]Repos scanned:[/bold] {report['scope']['repos_scanned']}")
|
|
1383
|
+
console.print(
|
|
1384
|
+
f"[bold]Repos with errors:[/bold] {report['summary']['repos_with_errors']}"
|
|
1385
|
+
)
|
|
1386
|
+
console.print(f"[bold]Total findings:[/bold] {report['summary']['total_findings']}")
|
|
1387
|
+
if errors:
|
|
1388
|
+
console.print(f"[yellow]Errors:[/yellow] {len(errors)} (see report)")
|
|
1389
|
+
if report["summary"]["total_errors"] > 0:
|
|
1390
|
+
exit_code = max(exit_code, 2)
|
|
1391
|
+
|
|
1392
|
+
any_enabled = (
|
|
1393
|
+
local.enabled
|
|
1394
|
+
or pub.enabled
|
|
1395
|
+
or spec.sweeps.local_dirty_worktree_secrets.enabled
|
|
1396
|
+
or spec.sweeps.project_flaudit.enabled
|
|
1397
|
+
or spec.sweeps.gitignore_audit.enabled
|
|
1398
|
+
or spec.sweeps.dependency_audit.enabled
|
|
1399
|
+
or spec.sweeps.ssh_key_audit.enabled
|
|
1400
|
+
or spec.sweeps.cargo_publish_audit.enabled
|
|
1401
|
+
or spec.sweeps.ai_editor_config_audit.enabled
|
|
1402
|
+
or spec.sweeps.publish_audit.enabled
|
|
1403
|
+
)
|
|
1404
|
+
if not wanted and not any_enabled:
|
|
1405
|
+
if not machine_output:
|
|
1406
|
+
console.print("[yellow]No sweeps enabled in spec.[/yellow]")
|
|
1407
|
+
|
|
1408
|
+
# Emit machine-readable output to stdout
|
|
1409
|
+
if machine_output and sweep_reports:
|
|
1410
|
+
if format == "sarif":
|
|
1411
|
+
from devguard.sarif import reports_to_sarif
|
|
1412
|
+
|
|
1413
|
+
sarif_doc = reports_to_sarif(sweep_reports)
|
|
1414
|
+
sys.stdout.write(json.dumps(sarif_doc, indent=2) + "\n")
|
|
1415
|
+
elif format == "json":
|
|
1416
|
+
combined = {name: report for name, report in sweep_reports}
|
|
1417
|
+
sys.stdout.write(json.dumps(combined, indent=2) + "\n")
|
|
1418
|
+
|
|
1419
|
+
if exit_code > 0:
|
|
1420
|
+
raise typer.Exit(code=exit_code)
|
|
1421
|
+
|
|
1422
|
+
|
|
1423
|
+
def _update_env_var(env_content: str, var_name: str, value: str) -> str:
|
|
1424
|
+
"""Update or add an environment variable in .env content."""
|
|
1425
|
+
lines = env_content.split("\n")
|
|
1426
|
+
updated = False
|
|
1427
|
+
new_lines = []
|
|
1428
|
+
|
|
1429
|
+
for line in lines:
|
|
1430
|
+
if line.startswith(f"{var_name}="):
|
|
1431
|
+
new_lines.append(f"{var_name}={value}")
|
|
1432
|
+
updated = True
|
|
1433
|
+
else:
|
|
1434
|
+
new_lines.append(line)
|
|
1435
|
+
|
|
1436
|
+
if not updated:
|
|
1437
|
+
new_lines.append(f"{var_name}={value}")
|
|
1438
|
+
|
|
1439
|
+
return "\n".join(new_lines) + "\n"
|
|
1440
|
+
|
|
1441
|
+
|
|
1442
|
+
@app.command()
|
|
1443
|
+
def doctor() -> None:
|
|
1444
|
+
"""Check external tool prerequisites for sweeps."""
|
|
1445
|
+
import shutil
|
|
1446
|
+
|
|
1447
|
+
tools = [
|
|
1448
|
+
(
|
|
1449
|
+
"trufflehog",
|
|
1450
|
+
"public_github_secrets, local_dirty_worktree_secrets",
|
|
1451
|
+
"brew install trufflehog",
|
|
1452
|
+
),
|
|
1453
|
+
(
|
|
1454
|
+
"cargo-audit",
|
|
1455
|
+
"dependency_audit (Rust repos)",
|
|
1456
|
+
"cargo install cargo-audit",
|
|
1457
|
+
),
|
|
1458
|
+
(
|
|
1459
|
+
"npm",
|
|
1460
|
+
"dependency_audit (JS repos)",
|
|
1461
|
+
"install Node.js from https://nodejs.org",
|
|
1462
|
+
),
|
|
1463
|
+
(
|
|
1464
|
+
"pip-audit",
|
|
1465
|
+
"dependency_audit (Python repos)",
|
|
1466
|
+
"pip install pip-audit",
|
|
1467
|
+
),
|
|
1468
|
+
(
|
|
1469
|
+
"gh",
|
|
1470
|
+
"public_github_secrets, ssh_key_audit (GitHub cross-ref)",
|
|
1471
|
+
"brew install gh",
|
|
1472
|
+
),
|
|
1473
|
+
(
|
|
1474
|
+
"git",
|
|
1475
|
+
"most sweeps",
|
|
1476
|
+
"brew install git",
|
|
1477
|
+
),
|
|
1478
|
+
]
|
|
1479
|
+
|
|
1480
|
+
found = 0
|
|
1481
|
+
total = len(tools)
|
|
1482
|
+
|
|
1483
|
+
table = Table(show_header=True, header_style="bold")
|
|
1484
|
+
table.add_column("Tool")
|
|
1485
|
+
table.add_column("Status")
|
|
1486
|
+
table.add_column("Used by")
|
|
1487
|
+
table.add_column("Install hint")
|
|
1488
|
+
|
|
1489
|
+
for tool_name, used_by, hint in tools:
|
|
1490
|
+
path = shutil.which(tool_name)
|
|
1491
|
+
if path:
|
|
1492
|
+
found += 1
|
|
1493
|
+
table.add_row(tool_name, "[green]found[/green]", used_by, "")
|
|
1494
|
+
else:
|
|
1495
|
+
table.add_row(tool_name, "[red]missing[/red]", used_by, hint)
|
|
1496
|
+
|
|
1497
|
+
console.print(table)
|
|
1498
|
+
console.print()
|
|
1499
|
+
console.print(
|
|
1500
|
+
f"{found}/{total} tools found. Missing tools will cause some sweeps to skip or fail."
|
|
1501
|
+
)
|
|
1502
|
+
|
|
1503
|
+
|
|
1504
|
+
def main() -> None:
|
|
1505
|
+
"""Entry point for CLI."""
|
|
1506
|
+
app()
|
|
1507
|
+
|
|
1508
|
+
|
|
1509
|
+
if __name__ == "__main__":
|
|
1510
|
+
main()
|