plugin-scanner 1.4.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. codex_plugin_scanner/__init__.py +29 -0
  2. codex_plugin_scanner/action_runner.py +470 -0
  3. codex_plugin_scanner/checks/__init__.py +0 -0
  4. codex_plugin_scanner/checks/best_practices.py +238 -0
  5. codex_plugin_scanner/checks/claude.py +285 -0
  6. codex_plugin_scanner/checks/code_quality.py +115 -0
  7. codex_plugin_scanner/checks/ecosystem_common.py +34 -0
  8. codex_plugin_scanner/checks/gemini.py +196 -0
  9. codex_plugin_scanner/checks/manifest.py +501 -0
  10. codex_plugin_scanner/checks/manifest_support.py +61 -0
  11. codex_plugin_scanner/checks/marketplace.py +334 -0
  12. codex_plugin_scanner/checks/opencode.py +223 -0
  13. codex_plugin_scanner/checks/operational_security.py +346 -0
  14. codex_plugin_scanner/checks/security.py +447 -0
  15. codex_plugin_scanner/checks/skill_security.py +241 -0
  16. codex_plugin_scanner/cli.py +467 -0
  17. codex_plugin_scanner/config.py +76 -0
  18. codex_plugin_scanner/ecosystems/__init__.py +15 -0
  19. codex_plugin_scanner/ecosystems/base.py +20 -0
  20. codex_plugin_scanner/ecosystems/claude.py +112 -0
  21. codex_plugin_scanner/ecosystems/codex.py +94 -0
  22. codex_plugin_scanner/ecosystems/detect.py +46 -0
  23. codex_plugin_scanner/ecosystems/gemini.py +80 -0
  24. codex_plugin_scanner/ecosystems/opencode.py +184 -0
  25. codex_plugin_scanner/ecosystems/registry.py +41 -0
  26. codex_plugin_scanner/ecosystems/types.py +45 -0
  27. codex_plugin_scanner/integrations/__init__.py +5 -0
  28. codex_plugin_scanner/integrations/cisco_skill_scanner.py +200 -0
  29. codex_plugin_scanner/lint_fixes.py +105 -0
  30. codex_plugin_scanner/marketplace_support.py +100 -0
  31. codex_plugin_scanner/models.py +177 -0
  32. codex_plugin_scanner/path_support.py +46 -0
  33. codex_plugin_scanner/policy.py +140 -0
  34. codex_plugin_scanner/quality_artifact.py +91 -0
  35. codex_plugin_scanner/repo_detect.py +137 -0
  36. codex_plugin_scanner/reporting.py +376 -0
  37. codex_plugin_scanner/rules/__init__.py +6 -0
  38. codex_plugin_scanner/rules/registry.py +101 -0
  39. codex_plugin_scanner/rules/specs.py +26 -0
  40. codex_plugin_scanner/scanner.py +557 -0
  41. codex_plugin_scanner/submission.py +284 -0
  42. codex_plugin_scanner/suppressions.py +87 -0
  43. codex_plugin_scanner/trust_domain_scoring.py +22 -0
  44. codex_plugin_scanner/trust_helpers.py +207 -0
  45. codex_plugin_scanner/trust_mcp_scoring.py +116 -0
  46. codex_plugin_scanner/trust_models.py +85 -0
  47. codex_plugin_scanner/trust_plugin_scoring.py +180 -0
  48. codex_plugin_scanner/trust_scoring.py +52 -0
  49. codex_plugin_scanner/trust_skill_scoring.py +296 -0
  50. codex_plugin_scanner/trust_specs.py +286 -0
  51. codex_plugin_scanner/verification.py +964 -0
  52. codex_plugin_scanner/version.py +3 -0
  53. plugin_scanner-1.4.15.dist-info/METADATA +596 -0
  54. plugin_scanner-1.4.15.dist-info/RECORD +57 -0
  55. plugin_scanner-1.4.15.dist-info/WHEEL +4 -0
  56. plugin_scanner-1.4.15.dist-info/entry_points.txt +4 -0
  57. plugin_scanner-1.4.15.dist-info/licenses/LICENSE +120 -0
@@ -0,0 +1,376 @@
1
+ """Structured report formatters for scan results."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+
7
+ from .models import GRADE_LABELS, SEVERITY_ORDER, Finding, ScanResult, Severity, severity_from_value
8
+ from .version import __version__
9
+
10
+
11
+ def _sorted_findings(findings: tuple[Finding, ...]) -> list[Finding]:
12
+ return sorted(findings, key=lambda finding: SEVERITY_ORDER[finding.severity], reverse=True)
13
+
14
+
15
+ def _serialize_trust(result: ScanResult) -> dict[str, object]:
16
+ report = result.trust_report
17
+ if report is None:
18
+ return {
19
+ "total": 0.0,
20
+ "execution": {"includeExternal": False, "computedAt": result.timestamp},
21
+ "domains": [],
22
+ }
23
+ return {
24
+ "total": report.total,
25
+ "execution": {
26
+ "includeExternal": report.include_external,
27
+ "computedAt": report.computed_at,
28
+ },
29
+ "domains": [
30
+ {
31
+ "domain": domain.domain,
32
+ "label": domain.label,
33
+ "score": domain.score,
34
+ "spec": {
35
+ "id": domain.spec_id,
36
+ "version": domain.spec_version,
37
+ "path": domain.spec_path,
38
+ "derivedFrom": list(domain.derived_from),
39
+ },
40
+ "profile": {
41
+ "id": domain.profile_id,
42
+ "version": domain.profile_version,
43
+ },
44
+ "adapters": [
45
+ {
46
+ "id": adapter.adapter_id,
47
+ "label": adapter.label,
48
+ "weight": adapter.weight,
49
+ "contributionMode": adapter.contribution_mode,
50
+ "applicable": adapter.applicable,
51
+ "emitted": adapter.emitted,
52
+ "includedInDenominator": adapter.included_in_denominator,
53
+ "score": adapter.score,
54
+ "components": [
55
+ {
56
+ "key": component.key,
57
+ "score": component.score,
58
+ "rationale": component.rationale,
59
+ "evidence": list(component.evidence),
60
+ }
61
+ for component in adapter.components
62
+ ],
63
+ }
64
+ for adapter in domain.adapters
65
+ ],
66
+ }
67
+ for domain in report.domains
68
+ ],
69
+ }
70
+
71
+
72
+ def build_json_payload(
73
+ result: ScanResult,
74
+ *,
75
+ profile: str = "default",
76
+ policy_pass: bool = True,
77
+ verify_pass: bool = True,
78
+ raw_score: int | None = None,
79
+ effective_score: int | None = None,
80
+ ) -> dict[str, object]:
81
+ """Convert a scan result into a JSON-serializable payload."""
82
+
83
+ payload = {
84
+ "schema_version": "scan-result.v1",
85
+ "tool_version": __version__,
86
+ "profile": profile,
87
+ "policy_pass": policy_pass,
88
+ "verify_pass": verify_pass,
89
+ "scope": result.scope,
90
+ "score": result.score,
91
+ "raw_score": result.score if raw_score is None else raw_score,
92
+ "effective_score": result.score if effective_score is None else effective_score,
93
+ "grade": result.grade,
94
+ "ecosystems": list(result.ecosystems),
95
+ "packages": [
96
+ {
97
+ "ecosystem": package.ecosystem,
98
+ "packageKind": package.package_kind,
99
+ "rootPath": package.root_path,
100
+ "manifestPath": package.manifest_path,
101
+ "name": package.name,
102
+ "version": package.version,
103
+ }
104
+ for package in result.packages
105
+ ],
106
+ "summary": {
107
+ "gradeLabel": GRADE_LABELS.get(result.grade, "Unknown"),
108
+ "findings": result.severity_counts,
109
+ "integrations": [
110
+ {
111
+ "name": integration.name,
112
+ "status": integration.status,
113
+ "message": integration.message,
114
+ "findingsCount": integration.findings_count,
115
+ "metadata": integration.metadata,
116
+ }
117
+ for integration in result.integrations
118
+ ],
119
+ },
120
+ "trust": _serialize_trust(result),
121
+ "categories": [
122
+ {
123
+ "name": category.name,
124
+ "score": sum(check.points for check in category.checks),
125
+ "max": sum(check.max_points for check in category.checks),
126
+ "checks": [
127
+ {
128
+ "name": check.name,
129
+ "passed": check.passed,
130
+ "points": check.points,
131
+ "maxPoints": check.max_points,
132
+ "message": check.message,
133
+ "findings": [
134
+ {
135
+ "ruleId": finding.rule_id,
136
+ "severity": finding.severity.value,
137
+ "title": finding.title,
138
+ "description": finding.description,
139
+ "remediation": finding.remediation,
140
+ "filePath": finding.file_path,
141
+ "lineNumber": finding.line_number,
142
+ "source": finding.source,
143
+ }
144
+ for finding in check.findings
145
+ ],
146
+ }
147
+ for check in category.checks
148
+ ],
149
+ }
150
+ for category in result.categories
151
+ ],
152
+ "findings": [
153
+ {
154
+ "ruleId": finding.rule_id,
155
+ "severity": finding.severity.value,
156
+ "category": finding.category,
157
+ "title": finding.title,
158
+ "description": finding.description,
159
+ "remediation": finding.remediation,
160
+ "filePath": finding.file_path,
161
+ "lineNumber": finding.line_number,
162
+ "source": finding.source,
163
+ }
164
+ for finding in _sorted_findings(result.findings)
165
+ ],
166
+ "timestamp": result.timestamp,
167
+ "pluginDir": result.plugin_dir,
168
+ }
169
+ if result.scope == "repository":
170
+ payload["repository"] = {
171
+ "marketplaceFile": result.marketplace_file,
172
+ "localPluginCount": len(result.plugin_results),
173
+ }
174
+ payload["plugins"] = [
175
+ {
176
+ "name": plugin.plugin_name or plugin.plugin_dir.rsplit("/", 1)[-1],
177
+ "pluginDir": plugin.plugin_dir,
178
+ "score": plugin.score,
179
+ "grade": plugin.grade,
180
+ "trust": _serialize_trust(plugin),
181
+ "summary": {
182
+ "findings": plugin.severity_counts,
183
+ "integrations": [
184
+ {
185
+ "name": integration.name,
186
+ "status": integration.status,
187
+ "message": integration.message,
188
+ "findingsCount": integration.findings_count,
189
+ "metadata": integration.metadata,
190
+ }
191
+ for integration in plugin.integrations
192
+ ],
193
+ },
194
+ }
195
+ for plugin in result.plugin_results
196
+ ]
197
+ payload["skippedTargets"] = [
198
+ {
199
+ "name": skipped.name,
200
+ "reason": skipped.reason,
201
+ "sourcePath": skipped.source_path,
202
+ }
203
+ for skipped in result.skipped_targets
204
+ ]
205
+ return payload
206
+
207
+
208
+ def format_json(
209
+ result: ScanResult,
210
+ *,
211
+ profile: str = "default",
212
+ policy_pass: bool = True,
213
+ verify_pass: bool = True,
214
+ raw_score: int | None = None,
215
+ effective_score: int | None = None,
216
+ ) -> str:
217
+ """Render a scan result as indented JSON."""
218
+
219
+ return json.dumps(
220
+ build_json_payload(
221
+ result,
222
+ profile=profile,
223
+ policy_pass=policy_pass,
224
+ verify_pass=verify_pass,
225
+ raw_score=raw_score,
226
+ effective_score=effective_score,
227
+ ),
228
+ indent=2,
229
+ )
230
+
231
+
232
+ def format_markdown(result: ScanResult) -> str:
233
+ """Render a scan result as a markdown report."""
234
+
235
+ lines = [
236
+ "# Codex Plugin Scanner Report",
237
+ "",
238
+ f"- {'Repository' if result.scope == 'repository' else 'Plugin'}: `{result.plugin_dir}`",
239
+ f"- Score: **{result.score}/100**",
240
+ f"- Grade: **{result.grade} - {GRADE_LABELS.get(result.grade, 'Unknown')}**",
241
+ f"- Trust: **{result.trust_report.total if result.trust_report else 0.0}/100**",
242
+ f"- Ecosystems: **{', '.join(result.ecosystems) if result.ecosystems else 'unknown'}**",
243
+ "",
244
+ "## Findings Summary",
245
+ "",
246
+ ]
247
+ for severity in Severity:
248
+ lines.append(f"- {severity.value.title()}: {result.severity_counts.get(severity.value, 0)}")
249
+
250
+ if result.scope == "repository":
251
+ lines += ["", "## Local Plugins", ""]
252
+ for plugin in result.plugin_results:
253
+ trust_total = plugin.trust_report.total if plugin.trust_report else 0.0
254
+ lines.append(
255
+ f"- **{plugin.plugin_name or plugin.plugin_dir}**: "
256
+ f"{plugin.score}/100 ({plugin.grade}), trust {trust_total}/100"
257
+ )
258
+ if result.skipped_targets:
259
+ lines += ["", "## Skipped Marketplace Entries", ""]
260
+ for skipped in result.skipped_targets:
261
+ source_path = f" (`{skipped.source_path}`)" if skipped.source_path else ""
262
+ lines.append(f"- **{skipped.name}**{source_path}: {skipped.reason}")
263
+
264
+ lines += ["", "## Categories", ""]
265
+ for category in result.categories:
266
+ category_score = sum(check.points for check in category.checks)
267
+ category_max = sum(check.max_points for check in category.checks)
268
+ lines.append(f"- **{category.name}**: {category_score}/{category_max}")
269
+
270
+ top_findings = _sorted_findings(result.findings)[:10]
271
+ lines += ["", "## Top Findings", ""]
272
+ if not top_findings:
273
+ lines.append("- No findings detected.")
274
+ else:
275
+ for finding in top_findings:
276
+ path = f" (`{finding.file_path}`)" if finding.file_path else ""
277
+ lines.append(f"- **{finding.severity.value.upper()}** {finding.title}{path}")
278
+ lines.append(f" - {finding.description}")
279
+ if finding.remediation:
280
+ lines.append(f" - Remediation: {finding.remediation}")
281
+
282
+ if result.trust_report and result.trust_report.domains:
283
+ lines += ["", "## Trust Provenance", ""]
284
+ for domain in result.trust_report.domains:
285
+ lines.append(f"- **{domain.label}** ({domain.spec_id}): {domain.score}/100")
286
+ for adapter in domain.adapters:
287
+ lines.append(f" - {adapter.label}: {adapter.score}/100 (weight {adapter.weight})")
288
+
289
+ lines += ["", "## Integration Status", ""]
290
+ for integration in result.integrations:
291
+ lines.append(f"- **{integration.name}**: `{integration.status}` - {integration.message}")
292
+
293
+ return "\n".join(lines)
294
+
295
+
296
+ def format_sarif(result: ScanResult) -> str:
297
+ """Render a scan result as SARIF 2.1.0 JSON."""
298
+
299
+ sorted_findings = _sorted_findings(result.findings)
300
+ rules = []
301
+ seen_rules: set[str] = set()
302
+ for finding in sorted_findings:
303
+ if finding.rule_id in seen_rules:
304
+ continue
305
+ rules.append(
306
+ {
307
+ "id": finding.rule_id,
308
+ "name": finding.title,
309
+ "shortDescription": {"text": finding.title},
310
+ "fullDescription": {"text": finding.description},
311
+ "help": {"text": finding.remediation or "Review and remediate this finding."},
312
+ "properties": {
313
+ "tags": [finding.category, finding.source],
314
+ "precision": "high",
315
+ "problem.severity": finding.severity.value,
316
+ },
317
+ }
318
+ )
319
+ seen_rules.add(finding.rule_id)
320
+
321
+ results = []
322
+ for finding in sorted_findings:
323
+ level = "note"
324
+ if SEVERITY_ORDER[finding.severity] >= SEVERITY_ORDER[Severity.HIGH]:
325
+ level = "error"
326
+ elif SEVERITY_ORDER[finding.severity] >= SEVERITY_ORDER[Severity.MEDIUM]:
327
+ level = "warning"
328
+
329
+ result_entry: dict[str, object] = {
330
+ "ruleId": finding.rule_id,
331
+ "level": level,
332
+ "message": {"text": finding.description},
333
+ "properties": {
334
+ "severity": finding.severity.value,
335
+ "category": finding.category,
336
+ "source": finding.source,
337
+ },
338
+ }
339
+ if finding.file_path:
340
+ location = {
341
+ "physicalLocation": {
342
+ "artifactLocation": {"uri": finding.file_path},
343
+ }
344
+ }
345
+ if finding.line_number:
346
+ location["physicalLocation"]["region"] = {"startLine": finding.line_number}
347
+ result_entry["locations"] = [location]
348
+ results.append(result_entry)
349
+
350
+ payload = {
351
+ "version": "2.1.0",
352
+ "$schema": "https://json.schemastore.org/sarif-2.1.0.json",
353
+ "runs": [
354
+ {
355
+ "tool": {
356
+ "driver": {
357
+ "name": "codex-plugin-scanner",
358
+ "informationUri": "https://github.com/hashgraph-online/codex-plugin-scanner",
359
+ "version": __version__,
360
+ "rules": rules,
361
+ }
362
+ },
363
+ "results": results,
364
+ }
365
+ ],
366
+ }
367
+ return json.dumps(payload, indent=2)
368
+
369
+
370
+ def should_fail_for_severity(result: ScanResult, threshold: str | None) -> bool:
371
+ """Return True when the result contains a finding at or above the threshold."""
372
+
373
+ if not threshold or threshold.lower() == "none":
374
+ return False
375
+ threshold_severity = severity_from_value(threshold)
376
+ return any(SEVERITY_ORDER[finding.severity] >= SEVERITY_ORDER[threshold_severity] for finding in result.findings)
@@ -0,0 +1,6 @@
1
+ """Rule registry exports."""
2
+
3
+ from codex_plugin_scanner.rules.registry import CATEGORY_WEIGHTS, get_rule_spec, has_rule_spec, list_rule_specs
4
+ from codex_plugin_scanner.rules.specs import RuleSpec
5
+
6
+ __all__ = ["CATEGORY_WEIGHTS", "RuleSpec", "get_rule_spec", "has_rule_spec", "list_rule_specs"]
@@ -0,0 +1,101 @@
1
+ """Built-in rule registry for stable rule IDs and metadata."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from codex_plugin_scanner.models import Severity
6
+ from codex_plugin_scanner.rules.specs import RuleSpec
7
+
8
+ CATEGORY_WEIGHTS: dict[str, int] = {
9
+ "manifest": 31,
10
+ "security": 16,
11
+ "operational-security": 18,
12
+ "best-practices": 15,
13
+ "marketplace": 11,
14
+ "skill-security": 9,
15
+ "code-quality": 10,
16
+ }
17
+
18
+ _DOC_ROOT = "https://github.com/hashgraph-online/codex-plugin-scanner/blob/main/docs/rules"
19
+
20
+
21
+ def _rule(
22
+ rule_id: str,
23
+ category: str,
24
+ severity: Severity,
25
+ weight: int,
26
+ docs_slug: str,
27
+ *,
28
+ fixable: bool = False,
29
+ ) -> RuleSpec:
30
+ title = rule_id.replace("_", " ").replace("-", " ").title()
31
+ return RuleSpec(
32
+ rule_id=rule_id,
33
+ category=category,
34
+ default_severity=severity,
35
+ weight=weight,
36
+ docs_slug=docs_slug,
37
+ description=f"{title} was detected.",
38
+ remediation=f"Review and remediate {title.lower()}.",
39
+ docs_url=f"{_DOC_ROOT}/{docs_slug}.md",
40
+ fixable=fixable,
41
+ )
42
+
43
+
44
+ RULE_SPECS: tuple[RuleSpec, ...] = (
45
+ _rule("PLUGIN_JSON_MISSING", "manifest", Severity.HIGH, 5, "plugin-json-missing"),
46
+ _rule("PLUGIN_JSON_INVALID", "manifest", Severity.HIGH, 5, "plugin-json-invalid"),
47
+ _rule("README_MISSING", "best-practices", Severity.LOW, 3, "readme-missing", fixable=True),
48
+ _rule("SKILLS_DIR_MISSING", "best-practices", Severity.MEDIUM, 4, "skills-dir-missing"),
49
+ _rule("SKILL_FRONTMATTER_INVALID", "best-practices", Severity.MEDIUM, 4, "skill-frontmatter-invalid"),
50
+ _rule("ENV_FILE_COMMITTED", "best-practices", Severity.HIGH, 5, "env-file-committed"),
51
+ _rule("CODEXIGNORE_MISSING", "best-practices", Severity.LOW, 3, "codexignore-missing", fixable=True),
52
+ _rule("SECURITY_MD_MISSING", "security", Severity.MEDIUM, 3, "security-md-missing", fixable=True),
53
+ _rule("LICENSE_MISSING", "security", Severity.MEDIUM, 3, "license-missing", fixable=True),
54
+ _rule("HARDCODED_SECRET", "security", Severity.CRITICAL, 7, "hardcoded-secret"),
55
+ _rule("DANGEROUS_MCP_COMMAND", "security", Severity.HIGH, 4, "dangerous-mcp-command"),
56
+ _rule("MCP_CONFIG_INVALID_JSON", "security", Severity.HIGH, 4, "mcp-config-invalid-json"),
57
+ _rule("MCP_REMOTE_URL_INSECURE", "security", Severity.HIGH, 4, "mcp-remote-url-insecure"),
58
+ _rule("RISKY_APPROVAL_DEFAULT", "security", Severity.MEDIUM, 2, "risky-approval-default"),
59
+ _rule("MARKETPLACE_JSON_INVALID", "marketplace", Severity.HIGH, 5, "marketplace-json-invalid"),
60
+ _rule("MARKETPLACE_NAME_MISSING", "marketplace", Severity.MEDIUM, 5, "marketplace-name-missing"),
61
+ _rule("MARKETPLACE_PLUGINS_MISSING", "marketplace", Severity.HIGH, 5, "marketplace-plugins-missing"),
62
+ _rule("MARKETPLACE_SOURCE_MISSING", "marketplace", Severity.MEDIUM, 5, "marketplace-source-missing"),
63
+ _rule("MARKETPLACE_POLICY_MISSING", "marketplace", Severity.MEDIUM, 5, "marketplace-policy-missing"),
64
+ _rule("MARKETPLACE_POLICY_FIELDS_MISSING", "marketplace", Severity.MEDIUM, 4, "marketplace-policy-fields-missing"),
65
+ _rule("MARKETPLACE_UNSAFE_SOURCE", "marketplace", Severity.HIGH, 3, "marketplace-unsafe-source"),
66
+ _rule("DANGEROUS_DYNAMIC_EXECUTION", "code-quality", Severity.HIGH, 5, "dangerous-dynamic-execution"),
67
+ _rule("SHELL_INJECTION_PATTERN", "code-quality", Severity.HIGH, 5, "shell-injection-pattern"),
68
+ _rule("GITHUB_ACTION_UNPINNED", "operational-security", Severity.HIGH, 5, "github-action-unpinned"),
69
+ _rule("GITHUB_ACTIONS_WRITE_ALL", "operational-security", Severity.HIGH, 5, "github-actions-write-all"),
70
+ _rule(
71
+ "GITHUB_ACTIONS_UNTRUSTED_CHECKOUT",
72
+ "operational-security",
73
+ Severity.HIGH,
74
+ 4,
75
+ "github-actions-untrusted-checkout",
76
+ ),
77
+ _rule("DEPENDABOT_MISSING", "operational-security", Severity.LOW, 2, "dependabot-missing"),
78
+ _rule(
79
+ "DEPENDABOT_GITHUB_ACTIONS_MISSING",
80
+ "operational-security",
81
+ Severity.LOW,
82
+ 2,
83
+ "dependabot-github-actions-missing",
84
+ ),
85
+ _rule("DEPENDENCY_LOCKFILE_MISSING", "operational-security", Severity.MEDIUM, 2, "dependency-lockfile-missing"),
86
+ _rule("CISCO-SCANNER-UNAVAILABLE", "skill-security", Severity.LOW, 3, "cisco-scanner-unavailable"),
87
+ )
88
+
89
+ _RULES_BY_ID: dict[str, RuleSpec] = {rule.rule_id: rule for rule in RULE_SPECS}
90
+
91
+
92
+ def list_rule_specs() -> tuple[RuleSpec, ...]:
93
+ return RULE_SPECS
94
+
95
+
96
+ def get_rule_spec(rule_id: str) -> RuleSpec | None:
97
+ return _RULES_BY_ID.get(rule_id)
98
+
99
+
100
+ def has_rule_spec(rule_id: str) -> bool:
101
+ return rule_id in _RULES_BY_ID
@@ -0,0 +1,26 @@
1
+ """Rule specification models used by lint and scan policy layers."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass
6
+
7
+ from codex_plugin_scanner.models import Severity
8
+
9
+
10
+ @dataclass(frozen=True, slots=True)
11
+ class RuleSpec:
12
+ """Static metadata for a scanner rule."""
13
+
14
+ rule_id: str
15
+ category: str
16
+ default_severity: Severity
17
+ weight: int
18
+ docs_slug: str
19
+ description: str
20
+ remediation: str
21
+ docs_url: str
22
+ fixable: bool = False
23
+ profiles: tuple[str, ...] = ("default", "public-marketplace", "strict-security")
24
+
25
+
26
+ ALL_PROFILES: tuple[str, ...] = ("default", "public-marketplace", "strict-security")