devguard 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- devguard/INTEGRATION_SUMMARY.md +121 -0
- devguard/__init__.py +3 -0
- devguard/__main__.py +6 -0
- devguard/checkers/__init__.py +41 -0
- devguard/checkers/api_usage.py +523 -0
- devguard/checkers/aws_cost.py +331 -0
- devguard/checkers/aws_iam.py +284 -0
- devguard/checkers/base.py +25 -0
- devguard/checkers/container.py +137 -0
- devguard/checkers/domain.py +189 -0
- devguard/checkers/firecrawl.py +117 -0
- devguard/checkers/fly.py +225 -0
- devguard/checkers/github.py +210 -0
- devguard/checkers/npm.py +327 -0
- devguard/checkers/npm_security.py +244 -0
- devguard/checkers/redteam.py +290 -0
- devguard/checkers/secret.py +279 -0
- devguard/checkers/swarm.py +376 -0
- devguard/checkers/tailscale.py +143 -0
- devguard/checkers/tailsnitch.py +303 -0
- devguard/checkers/tavily.py +179 -0
- devguard/checkers/vercel.py +192 -0
- devguard/cli.py +1510 -0
- devguard/cli_helpers.py +189 -0
- devguard/config.py +249 -0
- devguard/core.py +293 -0
- devguard/dashboard.py +715 -0
- devguard/discovery.py +363 -0
- devguard/http_client.py +142 -0
- devguard/llm_service.py +481 -0
- devguard/mcp_server.py +259 -0
- devguard/metrics.py +144 -0
- devguard/models.py +208 -0
- devguard/reporting.py +1571 -0
- devguard/sarif.py +295 -0
- devguard/scripts/ANALYSIS_SUMMARY.md +141 -0
- devguard/scripts/README.md +221 -0
- devguard/scripts/auto_fix_recommendations.py +145 -0
- devguard/scripts/generate_npmignore.py +175 -0
- devguard/scripts/generate_security_report.py +324 -0
- devguard/scripts/prepublish_check.sh +29 -0
- devguard/scripts/redteam_npm_packages.py +1262 -0
- devguard/scripts/review_all_repos.py +300 -0
- devguard/spec.py +617 -0
- devguard/sweeps/__init__.py +23 -0
- devguard/sweeps/ai_editor_config_audit.py +697 -0
- devguard/sweeps/cargo_publish_audit.py +655 -0
- devguard/sweeps/dependency_audit.py +419 -0
- devguard/sweeps/gitignore_audit.py +336 -0
- devguard/sweeps/local_dev.py +260 -0
- devguard/sweeps/local_dirty_worktree_secrets.py +521 -0
- devguard/sweeps/project_flaudit.py +636 -0
- devguard/sweeps/public_github_secrets.py +680 -0
- devguard/sweeps/publish_audit.py +478 -0
- devguard/sweeps/ssh_key_audit.py +327 -0
- devguard/utils.py +174 -0
- devguard-0.2.0.dist-info/METADATA +225 -0
- devguard-0.2.0.dist-info/RECORD +60 -0
- devguard-0.2.0.dist-info/WHEEL +4 -0
- devguard-0.2.0.dist-info/entry_points.txt +2 -0
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Generate automated fix recommendations based on security analysis."""
|
|
3
|
+
|
|
4
|
+
import json
|
|
5
|
+
import subprocess
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def generate_fix_commands(
|
|
11
|
+
report_path: Path = Path("npm_security_report.json"),
|
|
12
|
+
) -> list[dict[str, Any]]:
|
|
13
|
+
"""Generate fix commands based on security report."""
|
|
14
|
+
if not report_path.exists():
|
|
15
|
+
print(f"Error: Report not found: {report_path}")
|
|
16
|
+
print("Run: uv run python devguard/scripts/generate_security_report.py")
|
|
17
|
+
return []
|
|
18
|
+
|
|
19
|
+
with open(report_path) as f:
|
|
20
|
+
report = json.load(f)
|
|
21
|
+
|
|
22
|
+
fixes = []
|
|
23
|
+
|
|
24
|
+
# Fix missing .npmignore
|
|
25
|
+
missing_npmignore = report["summary"]["total_findings"].get("missing_npmignore", 0)
|
|
26
|
+
if missing_npmignore > 0:
|
|
27
|
+
fixes.append(
|
|
28
|
+
{
|
|
29
|
+
"priority": "high",
|
|
30
|
+
"issue": "Missing .npmignore files",
|
|
31
|
+
"command": "uv run python devguard/scripts/generate_npmignore.py",
|
|
32
|
+
"description": f"Generate .npmignore files for {missing_npmignore} package(s)",
|
|
33
|
+
}
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
# Fix dependency vulnerabilities
|
|
37
|
+
dep_vulns = report["summary"]["total_findings"].get("dependency_vulnerabilities", 0)
|
|
38
|
+
if dep_vulns > 0:
|
|
39
|
+
fixes.append(
|
|
40
|
+
{
|
|
41
|
+
"priority": "high",
|
|
42
|
+
"issue": "Dependency vulnerabilities",
|
|
43
|
+
"command": "npm audit fix",
|
|
44
|
+
"description": f"Fix {dep_vulns} dependency vulnerabilities",
|
|
45
|
+
"note": "Review changes before committing",
|
|
46
|
+
}
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
# Review obfuscated code
|
|
50
|
+
obfuscated = report["summary"]["total_findings"].get("obfuscated_code", 0)
|
|
51
|
+
if obfuscated > 0:
|
|
52
|
+
fixes.append(
|
|
53
|
+
{
|
|
54
|
+
"priority": "medium",
|
|
55
|
+
"issue": "Obfuscated code patterns",
|
|
56
|
+
"command": "grep -r 'Function\\|atob\\|eval' --include='*.js' --include='*.mjs' --include='*.ts'",
|
|
57
|
+
"description": f"Review {obfuscated} obfuscated code patterns",
|
|
58
|
+
"manual": True,
|
|
59
|
+
}
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
return fixes
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def main():
|
|
66
|
+
"""Main entry point."""
|
|
67
|
+
fixes = generate_fix_commands()
|
|
68
|
+
|
|
69
|
+
if not fixes:
|
|
70
|
+
print("No fixes needed or report not found.")
|
|
71
|
+
return
|
|
72
|
+
|
|
73
|
+
print("=" * 80)
|
|
74
|
+
print("AUTOMATED FIX RECOMMENDATIONS")
|
|
75
|
+
print("=" * 80)
|
|
76
|
+
print()
|
|
77
|
+
|
|
78
|
+
high_priority = [f for f in fixes if f["priority"] == "high"]
|
|
79
|
+
medium_priority = [f for f in fixes if f["priority"] == "medium"]
|
|
80
|
+
low_priority = [f for f in fixes if f["priority"] == "low"]
|
|
81
|
+
|
|
82
|
+
if high_priority:
|
|
83
|
+
print("š“ HIGH PRIORITY FIXES:")
|
|
84
|
+
print()
|
|
85
|
+
for fix in high_priority:
|
|
86
|
+
print(f" Issue: {fix['issue']}")
|
|
87
|
+
print(f" Description: {fix['description']}")
|
|
88
|
+
print(f" Command: {fix['command']}")
|
|
89
|
+
if "note" in fix:
|
|
90
|
+
print(f" Note: {fix['note']}")
|
|
91
|
+
if fix.get("manual"):
|
|
92
|
+
print(" ā ļø Manual review required")
|
|
93
|
+
print()
|
|
94
|
+
|
|
95
|
+
if medium_priority:
|
|
96
|
+
print("š” MEDIUM PRIORITY FIXES:")
|
|
97
|
+
print()
|
|
98
|
+
for fix in medium_priority:
|
|
99
|
+
print(f" Issue: {fix['issue']}")
|
|
100
|
+
print(f" Description: {fix['description']}")
|
|
101
|
+
if "command" in fix:
|
|
102
|
+
print(f" Command: {fix['command']}")
|
|
103
|
+
if fix.get("manual"):
|
|
104
|
+
print(" ā ļø Manual review required")
|
|
105
|
+
print()
|
|
106
|
+
|
|
107
|
+
if low_priority:
|
|
108
|
+
print("š¢ LOW PRIORITY FIXES:")
|
|
109
|
+
print()
|
|
110
|
+
for fix in low_priority:
|
|
111
|
+
print(f" Issue: {fix['issue']}")
|
|
112
|
+
print(f" Description: {fix['description']}")
|
|
113
|
+
print()
|
|
114
|
+
|
|
115
|
+
# Ask if user wants to apply fixes
|
|
116
|
+
print("=" * 80)
|
|
117
|
+
print("Apply fixes automatically? (y/n): ", end="")
|
|
118
|
+
try:
|
|
119
|
+
response = input().strip().lower()
|
|
120
|
+
if response == "y":
|
|
121
|
+
for fix in high_priority:
|
|
122
|
+
if not fix.get("manual"):
|
|
123
|
+
print(f"\nRunning: {fix['command']}")
|
|
124
|
+
try:
|
|
125
|
+
result = subprocess.run(
|
|
126
|
+
fix["command"].split(),
|
|
127
|
+
capture_output=True,
|
|
128
|
+
text=True,
|
|
129
|
+
check=False,
|
|
130
|
+
)
|
|
131
|
+
if result.returncode == 0:
|
|
132
|
+
print(f"ā Success: {fix['issue']}")
|
|
133
|
+
else:
|
|
134
|
+
print(f"ā Failed: {fix['issue']}")
|
|
135
|
+
print(result.stderr)
|
|
136
|
+
except Exception as e:
|
|
137
|
+
print(f"ā Error: {e}")
|
|
138
|
+
else:
|
|
139
|
+
print("Skipping automatic fixes. Run commands manually as needed.")
|
|
140
|
+
except (EOFError, KeyboardInterrupt):
|
|
141
|
+
print("\nSkipping automatic fixes.")
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
if __name__ == "__main__":
|
|
145
|
+
main()
|
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Generate .npmignore files for npm packages based on best practices."""
|
|
3
|
+
|
|
4
|
+
import json
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def generate_npmignore_content(
|
|
10
|
+
package_dir: Path, package_json: dict[str, Any] | None = None
|
|
11
|
+
) -> str:
|
|
12
|
+
"""Generate .npmignore content based on package structure and best practices."""
|
|
13
|
+
lines = [
|
|
14
|
+
"# .npmignore - Files excluded from npm package",
|
|
15
|
+
"# Generated based on npm best practices",
|
|
16
|
+
"",
|
|
17
|
+
"# Test files",
|
|
18
|
+
"test/",
|
|
19
|
+
"tests/",
|
|
20
|
+
"__tests__/",
|
|
21
|
+
"*.test.js",
|
|
22
|
+
"*.test.ts",
|
|
23
|
+
"*.test.mjs",
|
|
24
|
+
"*.spec.js",
|
|
25
|
+
"*.spec.ts",
|
|
26
|
+
"*.spec.mjs",
|
|
27
|
+
"",
|
|
28
|
+
"# Coverage reports",
|
|
29
|
+
"coverage/",
|
|
30
|
+
".nyc_output/",
|
|
31
|
+
"*.lcov",
|
|
32
|
+
"",
|
|
33
|
+
"# Development configuration",
|
|
34
|
+
".eslintrc*",
|
|
35
|
+
".prettierrc*",
|
|
36
|
+
".editorconfig",
|
|
37
|
+
".mocharc*",
|
|
38
|
+
"jest.config.*",
|
|
39
|
+
"vitest.config.*",
|
|
40
|
+
"",
|
|
41
|
+
"# CI/CD configuration (may contain secrets)",
|
|
42
|
+
".github/",
|
|
43
|
+
".gitlab-ci.yml",
|
|
44
|
+
".circleci/",
|
|
45
|
+
".travis.yml",
|
|
46
|
+
".drone.yml",
|
|
47
|
+
"azure-pipelines.yml",
|
|
48
|
+
"Jenkinsfile",
|
|
49
|
+
"",
|
|
50
|
+
"# Environment and secrets",
|
|
51
|
+
".env",
|
|
52
|
+
".env.*",
|
|
53
|
+
"*.env",
|
|
54
|
+
".secrets",
|
|
55
|
+
"secrets.json",
|
|
56
|
+
"credentials.json",
|
|
57
|
+
"config.json",
|
|
58
|
+
"",
|
|
59
|
+
"# Lock files",
|
|
60
|
+
"package-lock.json",
|
|
61
|
+
"yarn.lock",
|
|
62
|
+
"pnpm-lock.yaml",
|
|
63
|
+
"",
|
|
64
|
+
"# Git",
|
|
65
|
+
".git/",
|
|
66
|
+
".gitignore",
|
|
67
|
+
".gitattributes",
|
|
68
|
+
"",
|
|
69
|
+
"# IDE and editor files",
|
|
70
|
+
".vscode/",
|
|
71
|
+
".idea/",
|
|
72
|
+
"*.swp",
|
|
73
|
+
"*.swo",
|
|
74
|
+
"*~",
|
|
75
|
+
".DS_Store",
|
|
76
|
+
"",
|
|
77
|
+
"# Build artifacts (if source is published)",
|
|
78
|
+
"dist/",
|
|
79
|
+
"build/",
|
|
80
|
+
"*.map",
|
|
81
|
+
"",
|
|
82
|
+
"# Documentation (optional - remove if you want docs in package)",
|
|
83
|
+
"# docs/",
|
|
84
|
+
"# *.md",
|
|
85
|
+
"# README.md is always included by npm",
|
|
86
|
+
"",
|
|
87
|
+
"# Temporary files",
|
|
88
|
+
"*.tmp",
|
|
89
|
+
"*.log",
|
|
90
|
+
"*.cache",
|
|
91
|
+
"",
|
|
92
|
+
"# Source files (if publishing compiled code only)",
|
|
93
|
+
"# Uncomment if you only publish compiled output:",
|
|
94
|
+
"# src/",
|
|
95
|
+
"# *.ts",
|
|
96
|
+
"# tsconfig.json",
|
|
97
|
+
]
|
|
98
|
+
|
|
99
|
+
return "\n".join(lines) + "\n"
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def find_package_directories(base_path: Path) -> list[Path]:
|
|
103
|
+
"""Find all package.json files to generate .npmignore for."""
|
|
104
|
+
packages = []
|
|
105
|
+
for pkg_json in base_path.rglob("package.json"):
|
|
106
|
+
# Skip node_modules
|
|
107
|
+
if "node_modules" in pkg_json.parts:
|
|
108
|
+
continue
|
|
109
|
+
packages.append(pkg_json.parent)
|
|
110
|
+
return packages
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def main():
|
|
114
|
+
"""Main entry point."""
|
|
115
|
+
import sys
|
|
116
|
+
|
|
117
|
+
if len(sys.argv) > 1:
|
|
118
|
+
base_path = Path(sys.argv[1])
|
|
119
|
+
else:
|
|
120
|
+
# Default to common dev locations
|
|
121
|
+
base_path = Path.home() / "Documents" / "dev"
|
|
122
|
+
|
|
123
|
+
if not base_path.exists():
|
|
124
|
+
print(f"Error: Path does not exist: {base_path}")
|
|
125
|
+
return
|
|
126
|
+
|
|
127
|
+
packages = find_package_directories(base_path)
|
|
128
|
+
|
|
129
|
+
if not packages:
|
|
130
|
+
print(f"No package.json files found in {base_path}")
|
|
131
|
+
return
|
|
132
|
+
|
|
133
|
+
print(f"Found {len(packages)} packages")
|
|
134
|
+
print()
|
|
135
|
+
|
|
136
|
+
for pkg_dir in packages:
|
|
137
|
+
npmignore_path = pkg_dir / ".npmignore"
|
|
138
|
+
|
|
139
|
+
# Check if package.json exists
|
|
140
|
+
pkg_json_path = pkg_dir / "package.json"
|
|
141
|
+
package_json = None
|
|
142
|
+
if pkg_json_path.exists():
|
|
143
|
+
try:
|
|
144
|
+
with open(pkg_json_path) as f:
|
|
145
|
+
package_json = json.load(f)
|
|
146
|
+
except Exception:
|
|
147
|
+
pass
|
|
148
|
+
|
|
149
|
+
# Generate .npmignore
|
|
150
|
+
content = generate_npmignore_content(pkg_dir, package_json)
|
|
151
|
+
|
|
152
|
+
# Check if it already exists
|
|
153
|
+
if npmignore_path.exists():
|
|
154
|
+
existing = npmignore_path.read_text()
|
|
155
|
+
if existing.strip() == content.strip():
|
|
156
|
+
print(f"ā {pkg_dir.name}: .npmignore already up to date")
|
|
157
|
+
continue
|
|
158
|
+
else:
|
|
159
|
+
print(f"ā {pkg_dir.name}: .npmignore exists but differs")
|
|
160
|
+
print(" Backup existing file? (y/n): ", end="")
|
|
161
|
+
# For automation, we'll create a backup
|
|
162
|
+
backup_path = npmignore_path.with_suffix(".npmignore.backup")
|
|
163
|
+
npmignore_path.rename(backup_path)
|
|
164
|
+
print(f" Backed up to {backup_path.name}")
|
|
165
|
+
|
|
166
|
+
# Write new .npmignore
|
|
167
|
+
npmignore_path.write_text(content)
|
|
168
|
+
print(f"ā {pkg_dir.name}: Created/updated .npmignore")
|
|
169
|
+
|
|
170
|
+
print()
|
|
171
|
+
print("Done! Review the generated .npmignore files and customize as needed.")
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
if __name__ == "__main__":
|
|
175
|
+
main()
|
|
@@ -0,0 +1,324 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Generate a comprehensive security report from red team analysis."""
|
|
3
|
+
|
|
4
|
+
import asyncio
|
|
5
|
+
import json
|
|
6
|
+
|
|
7
|
+
# Import the red team analysis
|
|
8
|
+
import sys
|
|
9
|
+
from datetime import UTC, datetime
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
13
|
+
|
|
14
|
+
from devguard.scripts.redteam_npm_packages import analyze_package
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
async def generate_report(packages: list[str], versions: dict[str, str] | None = None) -> dict:
|
|
18
|
+
"""Generate comprehensive security report."""
|
|
19
|
+
results = []
|
|
20
|
+
|
|
21
|
+
for package in packages:
|
|
22
|
+
try:
|
|
23
|
+
version = versions.get(package) if versions else None
|
|
24
|
+
result = await analyze_package(package, version)
|
|
25
|
+
results.append(result)
|
|
26
|
+
except Exception as e:
|
|
27
|
+
results.append(
|
|
28
|
+
{
|
|
29
|
+
"package": package,
|
|
30
|
+
"error": str(e),
|
|
31
|
+
}
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
# Calculate summary statistics
|
|
35
|
+
total_findings = {
|
|
36
|
+
"secrets": 0,
|
|
37
|
+
"sensitive_files": 0,
|
|
38
|
+
"obfuscated_code": 0,
|
|
39
|
+
"git_history": 0,
|
|
40
|
+
"lock_files": 0,
|
|
41
|
+
"ci_configs": 0,
|
|
42
|
+
"missing_npmignore": 0,
|
|
43
|
+
"suspicious_scripts": 0,
|
|
44
|
+
"placeholder_values": 0,
|
|
45
|
+
"dependency_vulnerabilities": 0,
|
|
46
|
+
"postinstall_scripts": 0,
|
|
47
|
+
"file_permissions": 0,
|
|
48
|
+
"suspicious_package_names": 0,
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
critical_issues = []
|
|
52
|
+
warnings = []
|
|
53
|
+
recommendations = []
|
|
54
|
+
|
|
55
|
+
for result in results:
|
|
56
|
+
if "error" in result or "findings" not in result:
|
|
57
|
+
continue
|
|
58
|
+
|
|
59
|
+
findings = result["findings"]
|
|
60
|
+
|
|
61
|
+
# Count findings
|
|
62
|
+
total_findings["secrets"] += len(findings.get("secrets", []))
|
|
63
|
+
total_findings["sensitive_files"] += len(findings.get("sensitive_files", []))
|
|
64
|
+
total_findings["obfuscated_code"] += len(findings.get("obfuscated_code", []))
|
|
65
|
+
if findings.get("git_history"):
|
|
66
|
+
total_findings["git_history"] += 1
|
|
67
|
+
total_findings["lock_files"] += len(findings.get("lock_files", []))
|
|
68
|
+
total_findings["ci_configs"] += len(findings.get("ci_configs", []))
|
|
69
|
+
if findings.get("npmignore_missing"):
|
|
70
|
+
total_findings["missing_npmignore"] += 1
|
|
71
|
+
|
|
72
|
+
pkg_issues = findings.get("package_json_issues", {})
|
|
73
|
+
total_findings["suspicious_scripts"] += len(pkg_issues.get("suspicious_scripts", []))
|
|
74
|
+
total_findings["placeholder_values"] += len(pkg_issues.get("placeholder_values", []))
|
|
75
|
+
total_findings["dependency_vulnerabilities"] += len(
|
|
76
|
+
findings.get("dependency_vulnerabilities", [])
|
|
77
|
+
)
|
|
78
|
+
total_findings["postinstall_scripts"] += len(findings.get("postinstall_scripts", []))
|
|
79
|
+
total_findings["file_permissions"] += len(findings.get("file_permissions", []))
|
|
80
|
+
total_findings["suspicious_package_names"] += len(
|
|
81
|
+
findings.get("suspicious_package_names", [])
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# Categorize issues
|
|
85
|
+
if findings.get("secrets"):
|
|
86
|
+
critical_issues.append(
|
|
87
|
+
{
|
|
88
|
+
"package": result["package"],
|
|
89
|
+
"type": "secrets",
|
|
90
|
+
"count": len(findings["secrets"]),
|
|
91
|
+
}
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
if findings.get("git_history"):
|
|
95
|
+
critical_issues.append(
|
|
96
|
+
{
|
|
97
|
+
"package": result["package"],
|
|
98
|
+
"type": "git_history",
|
|
99
|
+
"message": ".git directory found in published package",
|
|
100
|
+
}
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
if findings.get("npmignore_missing"):
|
|
104
|
+
warnings.append(
|
|
105
|
+
{
|
|
106
|
+
"package": result["package"],
|
|
107
|
+
"type": "missing_npmignore",
|
|
108
|
+
"message": "No .npmignore file found",
|
|
109
|
+
}
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
if findings.get("obfuscated_code"):
|
|
113
|
+
warnings.append(
|
|
114
|
+
{
|
|
115
|
+
"package": result["package"],
|
|
116
|
+
"type": "obfuscated_code",
|
|
117
|
+
"count": len(findings["obfuscated_code"]),
|
|
118
|
+
}
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
# Generate recommendations
|
|
122
|
+
if total_findings["missing_npmignore"] > 0:
|
|
123
|
+
recommendations.append(
|
|
124
|
+
{
|
|
125
|
+
"priority": "high",
|
|
126
|
+
"action": "Add .npmignore files",
|
|
127
|
+
"packages_affected": total_findings["missing_npmignore"],
|
|
128
|
+
"command": "uv run python devguard/scripts/generate_npmignore.py",
|
|
129
|
+
}
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
if total_findings["obfuscated_code"] > 0:
|
|
133
|
+
recommendations.append(
|
|
134
|
+
{
|
|
135
|
+
"priority": "medium",
|
|
136
|
+
"action": "Review obfuscated code patterns",
|
|
137
|
+
"count": total_findings["obfuscated_code"],
|
|
138
|
+
"note": "Ensure Function(), atob(), etc. are used legitimately",
|
|
139
|
+
}
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
if total_findings["lock_files"] > 0:
|
|
143
|
+
recommendations.append(
|
|
144
|
+
{
|
|
145
|
+
"priority": "low",
|
|
146
|
+
"action": "Remove lock files from published packages",
|
|
147
|
+
"count": total_findings["lock_files"],
|
|
148
|
+
}
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
if total_findings["dependency_vulnerabilities"] > 0:
|
|
152
|
+
critical_dep_vulns = sum(
|
|
153
|
+
1
|
|
154
|
+
for r in results
|
|
155
|
+
if "findings" in r
|
|
156
|
+
for v in r["findings"].get("dependency_vulnerabilities", [])
|
|
157
|
+
if v.get("severity") in ["CRITICAL", "HIGH"]
|
|
158
|
+
)
|
|
159
|
+
priority = "high" if critical_dep_vulns > 0 else "medium"
|
|
160
|
+
recommendations.append(
|
|
161
|
+
{
|
|
162
|
+
"priority": priority,
|
|
163
|
+
"action": "Fix dependency vulnerabilities",
|
|
164
|
+
"count": total_findings["dependency_vulnerabilities"],
|
|
165
|
+
"critical_count": critical_dep_vulns,
|
|
166
|
+
"command": "npm audit fix",
|
|
167
|
+
"note": "Review changes before committing"
|
|
168
|
+
if critical_dep_vulns > 0
|
|
169
|
+
else "Update vulnerable dependencies",
|
|
170
|
+
}
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
if total_findings["postinstall_scripts"] > 0:
|
|
174
|
+
recommendations.append(
|
|
175
|
+
{
|
|
176
|
+
"priority": "medium",
|
|
177
|
+
"action": "Review install scripts for security risks",
|
|
178
|
+
"count": total_findings["postinstall_scripts"],
|
|
179
|
+
"note": "Install scripts can be supply chain attack vectors",
|
|
180
|
+
}
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
report = {
|
|
184
|
+
"generated_at": datetime.now(UTC).isoformat(),
|
|
185
|
+
"packages_analyzed": len(packages),
|
|
186
|
+
"summary": {
|
|
187
|
+
"critical_issues": len(critical_issues),
|
|
188
|
+
"warnings": len(warnings),
|
|
189
|
+
"total_findings": total_findings,
|
|
190
|
+
},
|
|
191
|
+
"critical_issues": critical_issues,
|
|
192
|
+
"warnings": warnings,
|
|
193
|
+
"recommendations": recommendations,
|
|
194
|
+
"detailed_results": results,
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
return report
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def generate_markdown_report(report: dict) -> str:
|
|
201
|
+
"""Generate markdown report from JSON report."""
|
|
202
|
+
lines = [
|
|
203
|
+
"# NPM Package Security Report",
|
|
204
|
+
"",
|
|
205
|
+
f"**Generated:** {report['generated_at']}",
|
|
206
|
+
f"**Packages Analyzed:** {report['packages_analyzed']}",
|
|
207
|
+
"",
|
|
208
|
+
"## Summary",
|
|
209
|
+
"",
|
|
210
|
+
f"- **Critical Issues:** {report['summary']['critical_issues']}",
|
|
211
|
+
f"- **Warnings:** {report['summary']['warnings']}",
|
|
212
|
+
"",
|
|
213
|
+
"### Findings",
|
|
214
|
+
"",
|
|
215
|
+
]
|
|
216
|
+
|
|
217
|
+
for key, value in report["summary"]["total_findings"].items():
|
|
218
|
+
if value > 0:
|
|
219
|
+
lines.append(f"- **{key.replace('_', ' ').title()}:** {value}")
|
|
220
|
+
|
|
221
|
+
if report["critical_issues"]:
|
|
222
|
+
lines.extend(
|
|
223
|
+
[
|
|
224
|
+
"",
|
|
225
|
+
"## Critical Issues",
|
|
226
|
+
"",
|
|
227
|
+
]
|
|
228
|
+
)
|
|
229
|
+
for issue in report["critical_issues"]:
|
|
230
|
+
lines.append(f"### {issue['package']}")
|
|
231
|
+
lines.append(f"- **Type:** {issue['type']}")
|
|
232
|
+
if "count" in issue:
|
|
233
|
+
lines.append(f"- **Count:** {issue['count']}")
|
|
234
|
+
if "message" in issue:
|
|
235
|
+
lines.append(f"- **Message:** {issue['message']}")
|
|
236
|
+
lines.append("")
|
|
237
|
+
|
|
238
|
+
if report["warnings"]:
|
|
239
|
+
lines.extend(
|
|
240
|
+
[
|
|
241
|
+
"## Warnings",
|
|
242
|
+
"",
|
|
243
|
+
]
|
|
244
|
+
)
|
|
245
|
+
for warning in report["warnings"]:
|
|
246
|
+
lines.append(f"- **{warning['package']}:** {warning.get('message', warning['type'])}")
|
|
247
|
+
lines.append("")
|
|
248
|
+
|
|
249
|
+
if report["recommendations"]:
|
|
250
|
+
lines.extend(
|
|
251
|
+
[
|
|
252
|
+
"## Recommendations",
|
|
253
|
+
"",
|
|
254
|
+
]
|
|
255
|
+
)
|
|
256
|
+
for rec in report["recommendations"]:
|
|
257
|
+
priority_emoji = {"high": "š“", "medium": "š”", "low": "š¢"}.get(rec["priority"], "ā¢")
|
|
258
|
+
lines.append(f"{priority_emoji} **[{rec['priority'].upper()}]** {rec['action']}")
|
|
259
|
+
if "packages_affected" in rec:
|
|
260
|
+
lines.append(f" - Affects {rec['packages_affected']} package(s)")
|
|
261
|
+
if "count" in rec:
|
|
262
|
+
lines.append(f" - Count: {rec['count']}")
|
|
263
|
+
if "command" in rec:
|
|
264
|
+
lines.append(f" - Command: `{rec['command']}`")
|
|
265
|
+
if "note" in rec:
|
|
266
|
+
lines.append(f" - Note: {rec['note']}")
|
|
267
|
+
lines.append("")
|
|
268
|
+
|
|
269
|
+
return "\n".join(lines)
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
async def main():
|
|
273
|
+
"""Main entry point."""
|
|
274
|
+
|
|
275
|
+
# Replace with your own packages to audit
|
|
276
|
+
packages = [
|
|
277
|
+
"example-package",
|
|
278
|
+
]
|
|
279
|
+
|
|
280
|
+
versions = {
|
|
281
|
+
"example-package": "1.0.0",
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
print("Generating comprehensive security report...")
|
|
285
|
+
report = await generate_report(packages, versions)
|
|
286
|
+
|
|
287
|
+
# Save JSON report
|
|
288
|
+
json_path = Path("npm_security_report.json")
|
|
289
|
+
with open(json_path, "w") as f:
|
|
290
|
+
json.dump(report, f, indent=2)
|
|
291
|
+
|
|
292
|
+
print(f"\nā JSON report saved to {json_path}")
|
|
293
|
+
|
|
294
|
+
# Generate and save markdown report
|
|
295
|
+
markdown = generate_markdown_report(report)
|
|
296
|
+
md_path = Path("npm_security_report.md")
|
|
297
|
+
md_path.write_text(markdown)
|
|
298
|
+
|
|
299
|
+
print(f"ā Markdown report saved to {md_path}")
|
|
300
|
+
|
|
301
|
+
# Print summary
|
|
302
|
+
print("\n" + "=" * 80)
|
|
303
|
+
print("SECURITY REPORT SUMMARY")
|
|
304
|
+
print("=" * 80)
|
|
305
|
+
print(f"Packages analyzed: {report['packages_analyzed']}")
|
|
306
|
+
print(f"Critical issues: {report['summary']['critical_issues']}")
|
|
307
|
+
print(f"Warnings: {report['summary']['warnings']}")
|
|
308
|
+
print("\nFindings:")
|
|
309
|
+
for key, value in report["summary"]["total_findings"].items():
|
|
310
|
+
if value > 0:
|
|
311
|
+
print(f" ⢠{key}: {value}")
|
|
312
|
+
|
|
313
|
+
if report["recommendations"]:
|
|
314
|
+
print("\nRecommendations:")
|
|
315
|
+
for rec in report["recommendations"]:
|
|
316
|
+
print(f" [{rec['priority'].upper()}] {rec['action']}")
|
|
317
|
+
if "packages_affected" in rec:
|
|
318
|
+
print(f" Affects {rec['packages_affected']} package(s)")
|
|
319
|
+
if "command" in rec:
|
|
320
|
+
print(f" Run: {rec['command']}")
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
if __name__ == "__main__":
|
|
324
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# Pre-publish security check hook for npm packages
|
|
3
|
+
# Add to package.json: "prepublishOnly": "./devguard/scripts/prepublish_check.sh"
|
|
4
|
+
|
|
5
|
+
set -e
|
|
6
|
+
|
|
7
|
+
echo "š Running pre-publish security checks..."
|
|
8
|
+
|
|
9
|
+
# Check if devguard scripts are available
|
|
10
|
+
if ! command -v python3 &> /dev/null; then
|
|
11
|
+
echo "ā ļø Python3 not found, skipping security checks"
|
|
12
|
+
exit 0
|
|
13
|
+
fi
|
|
14
|
+
|
|
15
|
+
# Run red team analysis
|
|
16
|
+
if [ -f "devguard/scripts/redteam_npm_packages.py" ]; then
|
|
17
|
+
echo "Running security analysis..."
|
|
18
|
+
python3 devguard/scripts/redteam_npm_packages.py || {
|
|
19
|
+
echo "ā Security checks failed!"
|
|
20
|
+
echo "Review the findings above before publishing."
|
|
21
|
+
exit 1
|
|
22
|
+
}
|
|
23
|
+
else
|
|
24
|
+
echo "ā ļø Security scripts not found, skipping checks"
|
|
25
|
+
fi
|
|
26
|
+
|
|
27
|
+
echo "ā
Pre-publish checks passed!"
|
|
28
|
+
|
|
29
|
+
|