tribunal-kit 1.0.0 → 2.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.agent/.shared/ui-ux-pro-max/README.md +3 -3
- package/.agent/ARCHITECTURE.md +205 -10
- package/.agent/GEMINI.md +37 -7
- package/.agent/agents/accessibility-reviewer.md +134 -0
- package/.agent/agents/ai-code-reviewer.md +129 -0
- package/.agent/agents/frontend-specialist.md +3 -0
- package/.agent/agents/game-developer.md +21 -21
- package/.agent/agents/logic-reviewer.md +12 -0
- package/.agent/agents/mobile-reviewer.md +79 -0
- package/.agent/agents/orchestrator.md +56 -26
- package/.agent/agents/performance-reviewer.md +36 -0
- package/.agent/agents/supervisor-agent.md +156 -0
- package/.agent/agents/swarm-worker-contracts.md +166 -0
- package/.agent/agents/swarm-worker-registry.md +92 -0
- package/.agent/rules/GEMINI.md +134 -5
- package/.agent/scripts/bundle_analyzer.py +259 -0
- package/.agent/scripts/dependency_analyzer.py +247 -0
- package/.agent/scripts/lint_runner.py +188 -0
- package/.agent/scripts/patch_skills_meta.py +177 -0
- package/.agent/scripts/patch_skills_output.py +285 -0
- package/.agent/scripts/schema_validator.py +279 -0
- package/.agent/scripts/security_scan.py +224 -0
- package/.agent/scripts/session_manager.py +144 -3
- package/.agent/scripts/skill_integrator.py +234 -0
- package/.agent/scripts/strengthen_skills.py +220 -0
- package/.agent/scripts/swarm_dispatcher.py +317 -0
- package/.agent/scripts/test_runner.py +192 -0
- package/.agent/scripts/test_swarm_dispatcher.py +163 -0
- package/.agent/skills/agent-organizer/SKILL.md +132 -0
- package/.agent/skills/agentic-patterns/SKILL.md +335 -0
- package/.agent/skills/api-patterns/SKILL.md +226 -50
- package/.agent/skills/app-builder/SKILL.md +215 -52
- package/.agent/skills/architecture/SKILL.md +176 -31
- package/.agent/skills/bash-linux/SKILL.md +150 -134
- package/.agent/skills/behavioral-modes/SKILL.md +152 -160
- package/.agent/skills/brainstorming/SKILL.md +148 -101
- package/.agent/skills/brainstorming/dynamic-questioning.md +10 -0
- package/.agent/skills/clean-code/SKILL.md +139 -134
- package/.agent/skills/code-review-checklist/SKILL.md +177 -80
- package/.agent/skills/config-validator/SKILL.md +165 -0
- package/.agent/skills/csharp-developer/SKILL.md +107 -0
- package/.agent/skills/database-design/SKILL.md +252 -29
- package/.agent/skills/deployment-procedures/SKILL.md +122 -175
- package/.agent/skills/devops-engineer/SKILL.md +134 -0
- package/.agent/skills/devops-incident-responder/SKILL.md +98 -0
- package/.agent/skills/documentation-templates/SKILL.md +175 -121
- package/.agent/skills/dotnet-core-expert/SKILL.md +103 -0
- package/.agent/skills/edge-computing/SKILL.md +213 -0
- package/.agent/skills/frontend-design/SKILL.md +76 -0
- package/.agent/skills/frontend-design/color-system.md +18 -0
- package/.agent/skills/frontend-design/typography-system.md +18 -0
- package/.agent/skills/game-development/SKILL.md +69 -0
- package/.agent/skills/geo-fundamentals/SKILL.md +158 -99
- package/.agent/skills/github-operations/SKILL.md +354 -0
- package/.agent/skills/i18n-localization/SKILL.md +158 -96
- package/.agent/skills/intelligent-routing/SKILL.md +89 -285
- package/.agent/skills/intelligent-routing/router-manifest.md +65 -0
- package/.agent/skills/lint-and-validate/SKILL.md +229 -27
- package/.agent/skills/llm-engineering/SKILL.md +258 -0
- package/.agent/skills/local-first/SKILL.md +203 -0
- package/.agent/skills/mcp-builder/SKILL.md +159 -111
- package/.agent/skills/mobile-design/SKILL.md +102 -282
- package/.agent/skills/nextjs-react-expert/SKILL.md +143 -227
- package/.agent/skills/nodejs-best-practices/SKILL.md +201 -254
- package/.agent/skills/observability/SKILL.md +285 -0
- package/.agent/skills/parallel-agents/SKILL.md +124 -118
- package/.agent/skills/performance-profiling/SKILL.md +143 -89
- package/.agent/skills/plan-writing/SKILL.md +133 -97
- package/.agent/skills/platform-engineer/SKILL.md +135 -0
- package/.agent/skills/powershell-windows/SKILL.md +167 -104
- package/.agent/skills/python-patterns/SKILL.md +149 -361
- package/.agent/skills/python-pro/SKILL.md +114 -0
- package/.agent/skills/react-specialist/SKILL.md +107 -0
- package/.agent/skills/readme-builder/SKILL.md +270 -0
- package/.agent/skills/realtime-patterns/SKILL.md +296 -0
- package/.agent/skills/red-team-tactics/SKILL.md +136 -134
- package/.agent/skills/rust-pro/SKILL.md +237 -173
- package/.agent/skills/seo-fundamentals/SKILL.md +134 -82
- package/.agent/skills/server-management/SKILL.md +155 -104
- package/.agent/skills/sql-pro/SKILL.md +104 -0
- package/.agent/skills/systematic-debugging/SKILL.md +156 -79
- package/.agent/skills/tailwind-patterns/SKILL.md +163 -205
- package/.agent/skills/tdd-workflow/SKILL.md +148 -88
- package/.agent/skills/test-result-analyzer/SKILL.md +299 -0
- package/.agent/skills/testing-patterns/SKILL.md +141 -114
- package/.agent/skills/trend-researcher/SKILL.md +228 -0
- package/.agent/skills/ui-ux-pro-max/SKILL.md +107 -0
- package/.agent/skills/ui-ux-researcher/SKILL.md +234 -0
- package/.agent/skills/vue-expert/SKILL.md +118 -0
- package/.agent/skills/vulnerability-scanner/SKILL.md +228 -188
- package/.agent/skills/web-design-guidelines/SKILL.md +148 -33
- package/.agent/skills/webapp-testing/SKILL.md +171 -122
- package/.agent/skills/whimsy-injector/SKILL.md +349 -0
- package/.agent/skills/workflow-optimizer/SKILL.md +219 -0
- package/.agent/workflows/api-tester.md +279 -0
- package/.agent/workflows/audit.md +168 -0
- package/.agent/workflows/brainstorm.md +65 -19
- package/.agent/workflows/changelog.md +144 -0
- package/.agent/workflows/create.md +67 -14
- package/.agent/workflows/debug.md +122 -30
- package/.agent/workflows/deploy.md +82 -31
- package/.agent/workflows/enhance.md +59 -27
- package/.agent/workflows/fix.md +143 -0
- package/.agent/workflows/generate.md +84 -20
- package/.agent/workflows/migrate.md +163 -0
- package/.agent/workflows/orchestrate.md +66 -17
- package/.agent/workflows/performance-benchmarker.md +305 -0
- package/.agent/workflows/plan.md +76 -33
- package/.agent/workflows/preview.md +73 -17
- package/.agent/workflows/refactor.md +153 -0
- package/.agent/workflows/review-ai.md +140 -0
- package/.agent/workflows/review.md +83 -16
- package/.agent/workflows/session.md +154 -0
- package/.agent/workflows/status.md +74 -18
- package/.agent/workflows/strengthen-skills.md +99 -0
- package/.agent/workflows/swarm.md +194 -0
- package/.agent/workflows/test.md +80 -31
- package/.agent/workflows/tribunal-backend.md +55 -13
- package/.agent/workflows/tribunal-database.md +62 -18
- package/.agent/workflows/tribunal-frontend.md +58 -12
- package/.agent/workflows/tribunal-full.md +70 -11
- package/.agent/workflows/tribunal-mobile.md +123 -0
- package/.agent/workflows/tribunal-performance.md +152 -0
- package/.agent/workflows/ui-ux-pro-max.md +100 -82
- package/README.md +117 -62
- package/bin/tribunal-kit.js +542 -288
- package/package.json +10 -6
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
schema_validator.py — Database schema validator for the Tribunal Agent Kit.
|
|
4
|
+
|
|
5
|
+
Detects ORM/schema type and validates for common issues:
|
|
6
|
+
- Missing indexes on foreign keys
|
|
7
|
+
- Unnamed constraints
|
|
8
|
+
- Inconsistent naming conventions
|
|
9
|
+
- Missing updated_at / created_at timestamps
|
|
10
|
+
- Prisma / Drizzle / raw SQL support
|
|
11
|
+
|
|
12
|
+
Usage:
|
|
13
|
+
python .agent/scripts/schema_validator.py .
|
|
14
|
+
python .agent/scripts/schema_validator.py . --type prisma
|
|
15
|
+
python .agent/scripts/schema_validator.py . --file prisma/schema.prisma
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
import os
|
|
19
|
+
import sys
|
|
20
|
+
import re
|
|
21
|
+
import argparse
|
|
22
|
+
from pathlib import Path
|
|
23
|
+
|
|
24
|
+
RED = "\033[91m"
|
|
25
|
+
GREEN = "\033[92m"
|
|
26
|
+
YELLOW = "\033[93m"
|
|
27
|
+
BLUE = "\033[94m"
|
|
28
|
+
BOLD = "\033[1m"
|
|
29
|
+
RESET = "\033[0m"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def header(title: str) -> None:
|
|
33
|
+
print(f"\n{BOLD}{BLUE}━━━ {title} ━━━{RESET}")
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def ok(msg: str) -> None:
|
|
37
|
+
print(f" {GREEN}✅ {msg}{RESET}")
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def fail(msg: str) -> None:
|
|
41
|
+
print(f" {RED}❌ {msg}{RESET}")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def warn(msg: str) -> None:
|
|
45
|
+
print(f" {YELLOW}⚠️ {msg}{RESET}")
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def skip(msg: str) -> None:
|
|
49
|
+
print(f" {YELLOW}⏭️ {msg}{RESET}")
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def detect_orm(project_root: str) -> str | None:
|
|
53
|
+
"""Detect the ORM/schema type from project files."""
|
|
54
|
+
root = Path(project_root)
|
|
55
|
+
|
|
56
|
+
if (root / "prisma" / "schema.prisma").exists():
|
|
57
|
+
return "prisma"
|
|
58
|
+
if list(root.glob("**/drizzle.config.*")):
|
|
59
|
+
return "drizzle"
|
|
60
|
+
if list(root.glob("**/migrations/*.sql")):
|
|
61
|
+
return "sql"
|
|
62
|
+
if (root / "knexfile.js").exists() or (root / "knexfile.ts").exists():
|
|
63
|
+
return "knex"
|
|
64
|
+
|
|
65
|
+
return None
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def validate_prisma(filepath: str) -> list[tuple[str, str, int]]:
|
|
69
|
+
"""Validate a Prisma schema file. Returns list of (severity, message, line)."""
|
|
70
|
+
issues: list[tuple[str, str, int]] = []
|
|
71
|
+
|
|
72
|
+
try:
|
|
73
|
+
with open(filepath, "r", encoding="utf-8") as f:
|
|
74
|
+
lines = f.readlines()
|
|
75
|
+
except (IOError, PermissionError):
|
|
76
|
+
return [("error", f"Cannot read file: {filepath}", 0)]
|
|
77
|
+
|
|
78
|
+
current_model = ""
|
|
79
|
+
has_created_at = False
|
|
80
|
+
has_updated_at = False
|
|
81
|
+
model_start_line = 0
|
|
82
|
+
fields_with_relation: list[tuple[str, int]] = []
|
|
83
|
+
indexed_fields: set[str] = set()
|
|
84
|
+
has_id_field = False
|
|
85
|
+
|
|
86
|
+
for line_num, line in enumerate(lines, 1):
|
|
87
|
+
stripped = line.strip()
|
|
88
|
+
|
|
89
|
+
# Track model boundaries
|
|
90
|
+
model_match = re.match(r'model\s+(\w+)\s*\{', stripped)
|
|
91
|
+
if model_match:
|
|
92
|
+
# Validate previous model if exists
|
|
93
|
+
if current_model:
|
|
94
|
+
if not has_created_at:
|
|
95
|
+
issues.append(("warn", f"Model '{current_model}' missing createdAt timestamp", model_start_line))
|
|
96
|
+
if not has_updated_at:
|
|
97
|
+
issues.append(("warn", f"Model '{current_model}' missing updatedAt timestamp", model_start_line))
|
|
98
|
+
if not has_id_field:
|
|
99
|
+
issues.append(("warn", f"Model '{current_model}' has no @id field", model_start_line))
|
|
100
|
+
for field_name, field_line in fields_with_relation:
|
|
101
|
+
if field_name not in indexed_fields:
|
|
102
|
+
issues.append(("warn", f"Model '{current_model}': foreign key '{field_name}' has no @@index", field_line))
|
|
103
|
+
|
|
104
|
+
current_model = model_match.group(1)
|
|
105
|
+
model_start_line = line_num
|
|
106
|
+
has_created_at = False
|
|
107
|
+
has_updated_at = False
|
|
108
|
+
has_id_field = False
|
|
109
|
+
fields_with_relation = []
|
|
110
|
+
indexed_fields = set()
|
|
111
|
+
|
|
112
|
+
# Check naming convention (PascalCase for models)
|
|
113
|
+
if not current_model[0].isupper():
|
|
114
|
+
issues.append(("warn", f"Model '{current_model}' should use PascalCase", line_num))
|
|
115
|
+
|
|
116
|
+
# Track fields
|
|
117
|
+
if current_model:
|
|
118
|
+
if "createdAt" in stripped or "created_at" in stripped:
|
|
119
|
+
has_created_at = True
|
|
120
|
+
if "updatedAt" in stripped or "updated_at" in stripped:
|
|
121
|
+
has_updated_at = True
|
|
122
|
+
if "@id" in stripped:
|
|
123
|
+
has_id_field = True
|
|
124
|
+
|
|
125
|
+
# Track relation fields (foreign keys)
|
|
126
|
+
relation_match = re.search(r'@relation\(.*references:\s*\[(\w+)\]', stripped)
|
|
127
|
+
if relation_match:
|
|
128
|
+
# The foreign key field is on the line with the scalar field, not the relation
|
|
129
|
+
pass
|
|
130
|
+
|
|
131
|
+
# Track fields that look like foreign keys (ending in Id)
|
|
132
|
+
fk_match = re.match(r'\s*(\w+Id)\s+', stripped)
|
|
133
|
+
if fk_match:
|
|
134
|
+
fields_with_relation.append((fk_match.group(1), line_num))
|
|
135
|
+
|
|
136
|
+
# Track @@index directives
|
|
137
|
+
index_match = re.search(r'@@index\(\[([^\]]+)\]', stripped)
|
|
138
|
+
if index_match:
|
|
139
|
+
for field in index_match.group(1).split(","):
|
|
140
|
+
indexed_fields.add(field.strip())
|
|
141
|
+
|
|
142
|
+
# Validate the last model
|
|
143
|
+
if current_model:
|
|
144
|
+
if not has_created_at:
|
|
145
|
+
issues.append(("warn", f"Model '{current_model}' missing createdAt timestamp", model_start_line))
|
|
146
|
+
if not has_updated_at:
|
|
147
|
+
issues.append(("warn", f"Model '{current_model}' missing updatedAt timestamp", model_start_line))
|
|
148
|
+
if not has_id_field:
|
|
149
|
+
issues.append(("warn", f"Model '{current_model}' has no @id field", model_start_line))
|
|
150
|
+
for field_name, field_line in fields_with_relation:
|
|
151
|
+
if field_name not in indexed_fields:
|
|
152
|
+
issues.append(("warn", f"Model '{current_model}': foreign key '{field_name}' may need @@index", field_line))
|
|
153
|
+
|
|
154
|
+
return issues
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def validate_sql_migration(filepath: str) -> list[tuple[str, str, int]]:
|
|
158
|
+
"""Validate a SQL migration file for common issues."""
|
|
159
|
+
issues: list[tuple[str, str, int]] = []
|
|
160
|
+
|
|
161
|
+
try:
|
|
162
|
+
with open(filepath, "r", encoding="utf-8") as f:
|
|
163
|
+
lines = f.readlines()
|
|
164
|
+
except (IOError, PermissionError):
|
|
165
|
+
return [("error", f"Cannot read file: {filepath}", 0)]
|
|
166
|
+
|
|
167
|
+
for line_num, line in enumerate(lines, 1):
|
|
168
|
+
stripped = line.strip().upper()
|
|
169
|
+
|
|
170
|
+
# Check for DROP without IF EXISTS
|
|
171
|
+
if "DROP TABLE" in stripped and "IF EXISTS" not in stripped:
|
|
172
|
+
issues.append(("warn", "DROP TABLE without IF EXISTS — may fail on clean databases", line_num))
|
|
173
|
+
|
|
174
|
+
# Check for missing NOT NULL on foreign keys
|
|
175
|
+
if "REFERENCES" in stripped and "NOT NULL" not in stripped and "NULL" not in stripped:
|
|
176
|
+
issues.append(("warn", "Foreign key without explicit NULL/NOT NULL constraint", line_num))
|
|
177
|
+
|
|
178
|
+
# Check for CREATE TABLE without timestamps
|
|
179
|
+
if "CREATE TABLE" in stripped:
|
|
180
|
+
issues.append(("info", "Verify this table includes created_at / updated_at columns", line_num))
|
|
181
|
+
|
|
182
|
+
return issues
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def main() -> None:
|
|
186
|
+
parser = argparse.ArgumentParser(
|
|
187
|
+
description="Tribunal schema validator — checks database schemas for common issues"
|
|
188
|
+
)
|
|
189
|
+
parser.add_argument("path", help="Project root directory")
|
|
190
|
+
parser.add_argument("--type", choices=["prisma", "drizzle", "sql", "auto"], default="auto", help="Schema type (default: auto-detect)")
|
|
191
|
+
parser.add_argument("--file", help="Specific schema file to validate")
|
|
192
|
+
args = parser.parse_args()
|
|
193
|
+
|
|
194
|
+
project_root = os.path.abspath(args.path)
|
|
195
|
+
if not os.path.isdir(project_root):
|
|
196
|
+
fail(f"Directory not found: {project_root}")
|
|
197
|
+
sys.exit(1)
|
|
198
|
+
|
|
199
|
+
print(f"{BOLD}Tribunal — schema_validator.py{RESET}")
|
|
200
|
+
print(f"Project: {project_root}")
|
|
201
|
+
|
|
202
|
+
orm_type = args.type if args.type != "auto" else detect_orm(project_root)
|
|
203
|
+
if not orm_type and not args.file:
|
|
204
|
+
skip("No schema files detected — skipping validation")
|
|
205
|
+
sys.exit(0)
|
|
206
|
+
|
|
207
|
+
issues_count = 0
|
|
208
|
+
|
|
209
|
+
if args.file:
|
|
210
|
+
header(f"Validating: {args.file}")
|
|
211
|
+
filepath = os.path.join(project_root, args.file) if not os.path.isabs(args.file) else args.file
|
|
212
|
+
if filepath.endswith(".prisma"):
|
|
213
|
+
issues = validate_prisma(filepath)
|
|
214
|
+
elif filepath.endswith(".sql"):
|
|
215
|
+
issues = validate_sql_migration(filepath)
|
|
216
|
+
else:
|
|
217
|
+
skip(f"Unknown schema file type: {args.file}")
|
|
218
|
+
sys.exit(0)
|
|
219
|
+
|
|
220
|
+
for severity, message, line in issues:
|
|
221
|
+
if severity == "error":
|
|
222
|
+
fail(f"L{line}: {message}")
|
|
223
|
+
issues_count += 1
|
|
224
|
+
elif severity == "warn":
|
|
225
|
+
warn(f"L{line}: {message}")
|
|
226
|
+
issues_count += 1
|
|
227
|
+
else:
|
|
228
|
+
print(f" {BLUE}ℹ️ L{line}: {message}{RESET}")
|
|
229
|
+
|
|
230
|
+
elif orm_type == "prisma":
|
|
231
|
+
schema_path = os.path.join(project_root, "prisma", "schema.prisma")
|
|
232
|
+
if os.path.isfile(schema_path):
|
|
233
|
+
header("Prisma Schema Validation")
|
|
234
|
+
issues = validate_prisma(schema_path)
|
|
235
|
+
for severity, message, line in issues:
|
|
236
|
+
if severity == "error":
|
|
237
|
+
fail(f"L{line}: {message}")
|
|
238
|
+
issues_count += 1
|
|
239
|
+
elif severity == "warn":
|
|
240
|
+
warn(f"L{line}: {message}")
|
|
241
|
+
issues_count += 1
|
|
242
|
+
else:
|
|
243
|
+
print(f" {BLUE}ℹ️ L{line}: {message}{RESET}")
|
|
244
|
+
else:
|
|
245
|
+
skip(f"Prisma schema not found at {schema_path}")
|
|
246
|
+
|
|
247
|
+
elif orm_type == "sql":
|
|
248
|
+
header("SQL Migration Validation")
|
|
249
|
+
migration_dirs = list(Path(project_root).glob("**/migrations"))
|
|
250
|
+
for mig_dir in migration_dirs:
|
|
251
|
+
for sql_file in sorted(mig_dir.glob("*.sql")):
|
|
252
|
+
print(f"\n 📄 {sql_file.name}")
|
|
253
|
+
issues = validate_sql_migration(str(sql_file))
|
|
254
|
+
for severity, message, line in issues:
|
|
255
|
+
if severity == "error":
|
|
256
|
+
fail(f" L{line}: {message}")
|
|
257
|
+
issues_count += 1
|
|
258
|
+
elif severity == "warn":
|
|
259
|
+
warn(f" L{line}: {message}")
|
|
260
|
+
issues_count += 1
|
|
261
|
+
else:
|
|
262
|
+
print(f" {BLUE}ℹ️ L{line}: {message}{RESET}")
|
|
263
|
+
|
|
264
|
+
elif orm_type == "drizzle":
|
|
265
|
+
header("Drizzle Schema")
|
|
266
|
+
skip("Drizzle validation not yet implemented — validate manually")
|
|
267
|
+
|
|
268
|
+
# Summary
|
|
269
|
+
print(f"\n{BOLD}━━━ Schema Validation Summary ━━━{RESET}")
|
|
270
|
+
if issues_count == 0:
|
|
271
|
+
ok("No schema issues found")
|
|
272
|
+
else:
|
|
273
|
+
warn(f"{issues_count} issue(s) found — review above")
|
|
274
|
+
|
|
275
|
+
sys.exit(0) # Schema warnings don't block — they're advisory
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
if __name__ == "__main__":
|
|
279
|
+
main()
|
|
@@ -0,0 +1,224 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
security_scan.py — Deep security scanner for the Tribunal Agent Kit.
|
|
4
|
+
|
|
5
|
+
Checks for OWASP Top 10 patterns in source code:
|
|
6
|
+
- Hardcoded secrets and credentials
|
|
7
|
+
- SQL injection patterns (string concatenation in queries)
|
|
8
|
+
- XSS-prone code (innerHTML, dangerouslySetInnerHTML)
|
|
9
|
+
- Insecure eval() usage
|
|
10
|
+
- Missing auth patterns
|
|
11
|
+
- Insecure crypto usage
|
|
12
|
+
|
|
13
|
+
Usage:
|
|
14
|
+
python .agent/scripts/security_scan.py .
|
|
15
|
+
python .agent/scripts/security_scan.py . --severity high
|
|
16
|
+
python .agent/scripts/security_scan.py . --files src/auth.ts src/db.ts
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
import os
|
|
20
|
+
import sys
|
|
21
|
+
import re
|
|
22
|
+
import argparse
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
from dataclasses import dataclass
|
|
25
|
+
|
|
26
|
+
RED = "\033[91m"
|
|
27
|
+
GREEN = "\033[92m"
|
|
28
|
+
YELLOW = "\033[93m"
|
|
29
|
+
BLUE = "\033[94m"
|
|
30
|
+
MAGENTA = "\033[95m"
|
|
31
|
+
BOLD = "\033[1m"
|
|
32
|
+
RESET = "\033[0m"
|
|
33
|
+
|
|
34
|
+
SOURCE_EXTENSIONS = {".ts", ".tsx", ".js", ".jsx", ".py", ".go", ".java", ".rb"}
|
|
35
|
+
SKIP_DIRS = {"node_modules", ".git", "dist", "build", "__pycache__", ".agent", ".next", "vendor"}
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@dataclass
|
|
39
|
+
class Finding:
|
|
40
|
+
severity: str # "critical", "high", "medium", "low"
|
|
41
|
+
category: str
|
|
42
|
+
file: str
|
|
43
|
+
line: int
|
|
44
|
+
message: str
|
|
45
|
+
snippet: str
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
SEVERITY_COLORS = {
|
|
49
|
+
"critical": RED + BOLD,
|
|
50
|
+
"high": RED,
|
|
51
|
+
"medium": YELLOW,
|
|
52
|
+
"low": BLUE,
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
SEVERITY_RANK = {"critical": 0, "high": 1, "medium": 2, "low": 3}
|
|
56
|
+
|
|
57
|
+
# Pattern definitions: (regex, severity, category, message)
|
|
58
|
+
PATTERNS: list[tuple[str, str, str, str]] = [
|
|
59
|
+
# Secrets
|
|
60
|
+
(r'(?:password|passwd|pwd)\s*=\s*["\'][^"\']+["\']', "critical", "Hardcoded Secret", "Hardcoded password detected"),
|
|
61
|
+
(r'(?:api_key|apikey|api_secret)\s*=\s*["\'][^"\']+["\']', "critical", "Hardcoded Secret", "Hardcoded API key detected"),
|
|
62
|
+
(r'(?:secret|token|auth_token)\s*=\s*["\'][A-Za-z0-9+/=]{16,}["\']', "critical", "Hardcoded Secret", "Hardcoded secret/token detected"),
|
|
63
|
+
(r'(?:PRIVATE_KEY|private_key)\s*=\s*["\']', "critical", "Hardcoded Secret", "Hardcoded private key detected"),
|
|
64
|
+
|
|
65
|
+
# SQL Injection
|
|
66
|
+
(r'(?:query|execute|raw)\s*\(\s*[`"\'].*\$\{', "high", "SQL Injection", "String interpolation in SQL query — use parameterized queries"),
|
|
67
|
+
(r'(?:query|execute|raw)\s*\(\s*["\'].*\+\s*(?:req|input|params|body)', "high", "SQL Injection", "String concatenation with user input in SQL"),
|
|
68
|
+
(r'\.raw\s*\(\s*`', "medium", "SQL Injection", "Raw query with template literal — verify inputs are sanitized"),
|
|
69
|
+
|
|
70
|
+
# XSS
|
|
71
|
+
(r'\.innerHTML\s*=', "high", "XSS", "Direct innerHTML assignment — use textContent or a sanitizer"),
|
|
72
|
+
(r'dangerouslySetInnerHTML', "medium", "XSS", "dangerouslySetInnerHTML used — ensure input is sanitized"),
|
|
73
|
+
(r'document\.write\s*\(', "high", "XSS", "document.write() is an XSS vector"),
|
|
74
|
+
|
|
75
|
+
# Insecure Functions
|
|
76
|
+
(r'\beval\s*\(', "high", "Code Injection", "eval() is a code injection vector — avoid entirely"),
|
|
77
|
+
(r'new\s+Function\s*\(', "high", "Code Injection", "new Function() is equivalent to eval()"),
|
|
78
|
+
(r'child_process\.exec\s*\(', "medium", "Command Injection", "exec() with unsanitized input is a command injection vector"),
|
|
79
|
+
(r'subprocess\.call\s*\(\s*[^,\]]*\bshell\s*=\s*True', "high", "Command Injection", "subprocess with shell=True — use shell=False and pass args as list"),
|
|
80
|
+
|
|
81
|
+
# Crypto
|
|
82
|
+
(r'createHash\s*\(\s*["\']md5["\']', "medium", "Weak Crypto", "MD5 is cryptographically broken — use SHA-256+"),
|
|
83
|
+
(r'createHash\s*\(\s*["\']sha1["\']', "medium", "Weak Crypto", "SHA-1 is deprecated — use SHA-256+"),
|
|
84
|
+
(r'Math\.random\s*\(', "low", "Weak Randomness", "Math.random() is not cryptographically secure — use crypto.randomBytes()"),
|
|
85
|
+
|
|
86
|
+
# Auth Issues
|
|
87
|
+
(r'algorithms\s*:\s*\[\s*["\']none["\']', "critical", "Auth Bypass", "JWT 'none' algorithm allows auth bypass"),
|
|
88
|
+
(r'verify\s*:\s*false', "high", "Auth Bypass", "SSL/TLS verification disabled"),
|
|
89
|
+
(r'rejectUnauthorized\s*:\s*false', "high", "Auth Bypass", "TLS certificate validation disabled"),
|
|
90
|
+
|
|
91
|
+
# Information Disclosure
|
|
92
|
+
(r'console\.log\s*\(.*(?:password|secret|token|key)', "medium", "Info Disclosure", "Sensitive data logged to console"),
|
|
93
|
+
(r'\.env(?:\.local|\.production)', "low", "Info Disclosure", "Env file reference — ensure not committed to git"),
|
|
94
|
+
]
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def scan_file(filepath: str, project_root: str) -> list[Finding]:
|
|
98
|
+
"""Scan a single file for security patterns."""
|
|
99
|
+
findings: list[Finding] = []
|
|
100
|
+
rel_path = os.path.relpath(filepath, project_root)
|
|
101
|
+
|
|
102
|
+
try:
|
|
103
|
+
with open(filepath, "r", encoding="utf-8", errors="ignore") as f:
|
|
104
|
+
lines = f.readlines()
|
|
105
|
+
except (IOError, PermissionError):
|
|
106
|
+
return findings
|
|
107
|
+
|
|
108
|
+
for line_num, line in enumerate(lines, 1):
|
|
109
|
+
stripped = line.strip()
|
|
110
|
+
# Skip comments
|
|
111
|
+
if stripped.startswith("//") or stripped.startswith("#") or stripped.startswith("*"):
|
|
112
|
+
continue
|
|
113
|
+
|
|
114
|
+
for pattern, severity, category, message in PATTERNS:
|
|
115
|
+
if re.search(pattern, stripped, re.IGNORECASE):
|
|
116
|
+
findings.append(Finding(
|
|
117
|
+
severity=severity,
|
|
118
|
+
category=category,
|
|
119
|
+
file=rel_path,
|
|
120
|
+
line=line_num,
|
|
121
|
+
message=message,
|
|
122
|
+
snippet=stripped[:120],
|
|
123
|
+
))
|
|
124
|
+
|
|
125
|
+
return findings
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def scan_directory(project_root: str, target_files: list[str] | None = None) -> list[Finding]:
|
|
129
|
+
"""Scan all source files in a directory."""
|
|
130
|
+
all_findings: list[Finding] = []
|
|
131
|
+
|
|
132
|
+
if target_files:
|
|
133
|
+
for fpath in target_files:
|
|
134
|
+
abs_path = os.path.join(project_root, fpath) if not os.path.isabs(fpath) else fpath
|
|
135
|
+
if os.path.isfile(abs_path):
|
|
136
|
+
all_findings.extend(scan_file(abs_path, project_root))
|
|
137
|
+
return all_findings
|
|
138
|
+
|
|
139
|
+
for root, dirs, files in os.walk(project_root):
|
|
140
|
+
dirs[:] = [d for d in dirs if d not in SKIP_DIRS]
|
|
141
|
+
for filename in files:
|
|
142
|
+
ext = Path(filename).suffix
|
|
143
|
+
if ext not in SOURCE_EXTENSIONS:
|
|
144
|
+
continue
|
|
145
|
+
filepath = os.path.join(root, filename)
|
|
146
|
+
all_findings.extend(scan_file(filepath, project_root))
|
|
147
|
+
|
|
148
|
+
return all_findings
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def print_findings(findings: list[Finding], min_severity: str) -> int:
|
|
152
|
+
"""Print findings filtered by minimum severity. Returns count of displayed findings."""
|
|
153
|
+
min_rank = SEVERITY_RANK.get(min_severity, 3)
|
|
154
|
+
filtered = [f for f in findings if SEVERITY_RANK.get(f.severity, 3) <= min_rank]
|
|
155
|
+
filtered.sort(key=lambda f: SEVERITY_RANK.get(f.severity, 3))
|
|
156
|
+
|
|
157
|
+
if not filtered:
|
|
158
|
+
print(f"\n {GREEN}✅ No security issues found at severity '{min_severity}' or above{RESET}")
|
|
159
|
+
return 0
|
|
160
|
+
|
|
161
|
+
current_category = ""
|
|
162
|
+
for finding in filtered:
|
|
163
|
+
if finding.category != current_category:
|
|
164
|
+
current_category = finding.category
|
|
165
|
+
print(f"\n {BOLD}{current_category}{RESET}")
|
|
166
|
+
|
|
167
|
+
color = SEVERITY_COLORS.get(finding.severity, "")
|
|
168
|
+
print(f" {color}[{finding.severity.upper()}]{RESET} {finding.file}:{finding.line}")
|
|
169
|
+
print(f" {finding.message}")
|
|
170
|
+
print(f" {MAGENTA}→ {finding.snippet}{RESET}")
|
|
171
|
+
|
|
172
|
+
return len(filtered)
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def main() -> None:
|
|
176
|
+
parser = argparse.ArgumentParser(
|
|
177
|
+
description="Tribunal security scanner — OWASP-aware source code analysis"
|
|
178
|
+
)
|
|
179
|
+
parser.add_argument("path", help="Project root directory to scan")
|
|
180
|
+
parser.add_argument(
|
|
181
|
+
"--severity",
|
|
182
|
+
choices=["critical", "high", "medium", "low"],
|
|
183
|
+
default="low",
|
|
184
|
+
help="Minimum severity to report (default: low — show everything)",
|
|
185
|
+
)
|
|
186
|
+
parser.add_argument("--files", nargs="*", help="Specific files to scan")
|
|
187
|
+
args = parser.parse_args()
|
|
188
|
+
|
|
189
|
+
project_root = os.path.abspath(args.path)
|
|
190
|
+
if not os.path.isdir(project_root):
|
|
191
|
+
print(f" {RED}❌ Directory not found: {project_root}{RESET}")
|
|
192
|
+
sys.exit(1)
|
|
193
|
+
|
|
194
|
+
print(f"{BOLD}Tribunal — security_scan.py{RESET}")
|
|
195
|
+
print(f"Project: {project_root}")
|
|
196
|
+
print(f"Severity filter: {args.severity}+")
|
|
197
|
+
|
|
198
|
+
findings = scan_directory(project_root, args.files)
|
|
199
|
+
count = print_findings(findings, args.severity)
|
|
200
|
+
|
|
201
|
+
# Summary
|
|
202
|
+
print(f"\n{BOLD}━━━ Security Scan Summary ━━━{RESET}")
|
|
203
|
+
by_severity: dict[str, int] = {}
|
|
204
|
+
for f in findings:
|
|
205
|
+
by_severity[f.severity] = by_severity.get(f.severity, 0) + 1
|
|
206
|
+
|
|
207
|
+
for sev in ["critical", "high", "medium", "low"]:
|
|
208
|
+
c = by_severity.get(sev, 0)
|
|
209
|
+
if c > 0:
|
|
210
|
+
color = SEVERITY_COLORS.get(sev, "")
|
|
211
|
+
print(f" {color}{sev.upper()}: {c}{RESET}")
|
|
212
|
+
|
|
213
|
+
if count == 0:
|
|
214
|
+
print(f" {GREEN}✅ No issues found — scan passed{RESET}")
|
|
215
|
+
else:
|
|
216
|
+
critical_high = by_severity.get("critical", 0) + by_severity.get("high", 0)
|
|
217
|
+
if critical_high > 0:
|
|
218
|
+
print(f"\n {RED}{BOLD}⚠️ {critical_high} critical/high issue(s) require immediate attention{RESET}")
|
|
219
|
+
|
|
220
|
+
sys.exit(1 if by_severity.get("critical", 0) > 0 else 0)
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
if __name__ == "__main__":
|
|
224
|
+
main()
|