@cleocode/skills 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dispatch-config.json +404 -0
- package/index.d.ts +178 -0
- package/index.js +405 -0
- package/package.json +14 -0
- package/profiles/core.json +7 -0
- package/profiles/full.json +10 -0
- package/profiles/minimal.json +7 -0
- package/profiles/recommended.json +7 -0
- package/provider-skills-map.json +97 -0
- package/skills/_shared/cleo-style-guide.md +84 -0
- package/skills/_shared/manifest-operations.md +810 -0
- package/skills/_shared/placeholders.json +433 -0
- package/skills/_shared/skill-chaining-patterns.md +237 -0
- package/skills/_shared/subagent-protocol-base.md +223 -0
- package/skills/_shared/task-system-integration.md +232 -0
- package/skills/_shared/testing-framework-config.md +110 -0
- package/skills/ct-cleo/SKILL.md +490 -0
- package/skills/ct-cleo/references/anti-patterns.md +19 -0
- package/skills/ct-cleo/references/loom-lifecycle.md +136 -0
- package/skills/ct-cleo/references/orchestrator-constraints.md +55 -0
- package/skills/ct-cleo/references/session-protocol.md +162 -0
- package/skills/ct-codebase-mapper/SKILL.md +82 -0
- package/skills/ct-contribution/SKILL.md +521 -0
- package/skills/ct-contribution/templates/contribution-init.json +21 -0
- package/skills/ct-dev-workflow/SKILL.md +423 -0
- package/skills/ct-docs-lookup/SKILL.md +66 -0
- package/skills/ct-docs-review/SKILL.md +175 -0
- package/skills/ct-docs-write/SKILL.md +108 -0
- package/skills/ct-documentor/SKILL.md +231 -0
- package/skills/ct-epic-architect/SKILL.md +305 -0
- package/skills/ct-epic-architect/references/bug-epic-example.md +172 -0
- package/skills/ct-epic-architect/references/commands.md +201 -0
- package/skills/ct-epic-architect/references/feature-epic-example.md +210 -0
- package/skills/ct-epic-architect/references/migration-epic-example.md +244 -0
- package/skills/ct-epic-architect/references/output-format.md +92 -0
- package/skills/ct-epic-architect/references/patterns.md +284 -0
- package/skills/ct-epic-architect/references/refactor-epic-example.md +412 -0
- package/skills/ct-epic-architect/references/research-epic-example.md +226 -0
- package/skills/ct-epic-architect/references/shell-escaping.md +86 -0
- package/skills/ct-epic-architect/references/skill-aware-execution.md +195 -0
- package/skills/ct-grade/SKILL.md +230 -0
- package/skills/ct-grade/agents/analysis-reporter.md +203 -0
- package/skills/ct-grade/agents/blind-comparator.md +157 -0
- package/skills/ct-grade/agents/scenario-runner.md +134 -0
- package/skills/ct-grade/eval-viewer/__pycache__/generate_grade_review.cpython-314.pyc +0 -0
- package/skills/ct-grade/eval-viewer/generate_grade_review.py +1138 -0
- package/skills/ct-grade/eval-viewer/generate_grade_viewer.py +544 -0
- package/skills/ct-grade/eval-viewer/generate_review.py +283 -0
- package/skills/ct-grade/eval-viewer/grade-review.html +1574 -0
- package/skills/ct-grade/eval-viewer/viewer.html +219 -0
- package/skills/ct-grade/evals/evals.json +94 -0
- package/skills/ct-grade/references/ab-test-methodology.md +150 -0
- package/skills/ct-grade/references/domains.md +137 -0
- package/skills/ct-grade/references/grade-spec.md +236 -0
- package/skills/ct-grade/references/scenario-playbook.md +234 -0
- package/skills/ct-grade/references/token-tracking.md +120 -0
- package/skills/ct-grade/scripts/__pycache__/audit_analyzer.cpython-314.pyc +0 -0
- package/skills/ct-grade/scripts/__pycache__/run_ab_test.cpython-314.pyc +0 -0
- package/skills/ct-grade/scripts/__pycache__/run_all.cpython-314.pyc +0 -0
- package/skills/ct-grade/scripts/__pycache__/token_tracker.cpython-314.pyc +0 -0
- package/skills/ct-grade/scripts/audit_analyzer.py +279 -0
- package/skills/ct-grade/scripts/generate_report.py +283 -0
- package/skills/ct-grade/scripts/run_ab_test.py +504 -0
- package/skills/ct-grade/scripts/run_all.py +287 -0
- package/skills/ct-grade/scripts/setup_run.py +183 -0
- package/skills/ct-grade/scripts/token_tracker.py +630 -0
- package/skills/ct-grade-v2-1/SKILL.md +237 -0
- package/skills/ct-grade-v2-1/agents/analysis-reporter.md +203 -0
- package/skills/ct-grade-v2-1/agents/blind-comparator.md +157 -0
- package/skills/ct-grade-v2-1/agents/scenario-runner.md +179 -0
- package/skills/ct-grade-v2-1/evals/evals.json +74 -0
- package/skills/ct-grade-v2-1/grade-viewer/__pycache__/build_op_stats.cpython-314.pyc +0 -0
- package/skills/ct-grade-v2-1/grade-viewer/__pycache__/generate_grade_review.cpython-314.pyc +0 -0
- package/skills/ct-grade-v2-1/grade-viewer/build_op_stats.py +174 -0
- package/skills/ct-grade-v2-1/grade-viewer/eval-analysis.json +41 -0
- package/skills/ct-grade-v2-1/grade-viewer/eval-report.md +34 -0
- package/skills/ct-grade-v2-1/grade-viewer/generate_grade_review.py +1023 -0
- package/skills/ct-grade-v2-1/grade-viewer/generate_grade_viewer.py +548 -0
- package/skills/ct-grade-v2-1/grade-viewer/grade-review-eval.html +613 -0
- package/skills/ct-grade-v2-1/grade-viewer/grade-review.html +1532 -0
- package/skills/ct-grade-v2-1/grade-viewer/viewer.html +620 -0
- package/skills/ct-grade-v2-1/manifest-entry.json +31 -0
- package/skills/ct-grade-v2-1/references/ab-testing.md +233 -0
- package/skills/ct-grade-v2-1/references/domains-ssot.md +156 -0
- package/skills/ct-grade-v2-1/references/grade-spec-v2.md +167 -0
- package/skills/ct-grade-v2-1/references/playbook-v2.md +393 -0
- package/skills/ct-grade-v2-1/references/token-tracking.md +202 -0
- package/skills/ct-grade-v2-1/scripts/generate_report.py +419 -0
- package/skills/ct-grade-v2-1/scripts/run_ab_test.py +493 -0
- package/skills/ct-grade-v2-1/scripts/run_scenario.py +396 -0
- package/skills/ct-grade-v2-1/scripts/setup_run.py +207 -0
- package/skills/ct-grade-v2-1/scripts/token_tracker.py +175 -0
- package/skills/ct-memory/SKILL.md +84 -0
- package/skills/ct-orchestrator/INSTALL.md +61 -0
- package/skills/ct-orchestrator/README.md +69 -0
- package/skills/ct-orchestrator/SKILL.md +380 -0
- package/skills/ct-orchestrator/manifest-entry.json +19 -0
- package/skills/ct-orchestrator/orchestrator-prompt.txt +17 -0
- package/skills/ct-orchestrator/references/SUBAGENT-PROTOCOL-BLOCK.md +66 -0
- package/skills/ct-orchestrator/references/autonomous-operation.md +167 -0
- package/skills/ct-orchestrator/references/lifecycle-gates.md +98 -0
- package/skills/ct-orchestrator/references/orchestrator-compliance.md +271 -0
- package/skills/ct-orchestrator/references/orchestrator-handoffs.md +85 -0
- package/skills/ct-orchestrator/references/orchestrator-patterns.md +164 -0
- package/skills/ct-orchestrator/references/orchestrator-recovery.md +113 -0
- package/skills/ct-orchestrator/references/orchestrator-spawning.md +271 -0
- package/skills/ct-orchestrator/references/orchestrator-tokens.md +180 -0
- package/skills/ct-research-agent/SKILL.md +226 -0
- package/skills/ct-skill-creator/.cleo/.context-state.json +13 -0
- package/skills/ct-skill-creator/.cleo/logs/cleo.2026-03-07.1.log +24 -0
- package/skills/ct-skill-creator/.cleo/tasks.db +0 -0
- package/skills/ct-skill-creator/SKILL.md +356 -0
- package/skills/ct-skill-creator/agents/analyzer.md +276 -0
- package/skills/ct-skill-creator/agents/comparator.md +204 -0
- package/skills/ct-skill-creator/agents/grader.md +225 -0
- package/skills/ct-skill-creator/assets/eval_review.html +146 -0
- package/skills/ct-skill-creator/eval-viewer/__pycache__/generate_review.cpython-314.pyc +0 -0
- package/skills/ct-skill-creator/eval-viewer/generate_review.py +471 -0
- package/skills/ct-skill-creator/eval-viewer/viewer.html +1325 -0
- package/skills/ct-skill-creator/manifest-entry.json +17 -0
- package/skills/ct-skill-creator/references/dynamic-context.md +228 -0
- package/skills/ct-skill-creator/references/frontmatter.md +83 -0
- package/skills/ct-skill-creator/references/invocation-control.md +165 -0
- package/skills/ct-skill-creator/references/output-patterns.md +86 -0
- package/skills/ct-skill-creator/references/provider-deployment.md +175 -0
- package/skills/ct-skill-creator/references/schemas.md +430 -0
- package/skills/ct-skill-creator/references/workflows.md +28 -0
- package/skills/ct-skill-creator/scripts/__init__.py +1 -0
- package/skills/ct-skill-creator/scripts/__pycache__/__init__.cpython-314.pyc +0 -0
- package/skills/ct-skill-creator/scripts/__pycache__/aggregate_benchmark.cpython-314.pyc +0 -0
- package/skills/ct-skill-creator/scripts/__pycache__/generate_report.cpython-314.pyc +0 -0
- package/skills/ct-skill-creator/scripts/__pycache__/improve_description.cpython-314.pyc +0 -0
- package/skills/ct-skill-creator/scripts/__pycache__/init_skill.cpython-314.pyc +0 -0
- package/skills/ct-skill-creator/scripts/__pycache__/quick_validate.cpython-314.pyc +0 -0
- package/skills/ct-skill-creator/scripts/__pycache__/run_eval.cpython-314.pyc +0 -0
- package/skills/ct-skill-creator/scripts/__pycache__/run_loop.cpython-314.pyc +0 -0
- package/skills/ct-skill-creator/scripts/__pycache__/utils.cpython-314.pyc +0 -0
- package/skills/ct-skill-creator/scripts/aggregate_benchmark.py +401 -0
- package/skills/ct-skill-creator/scripts/generate_report.py +326 -0
- package/skills/ct-skill-creator/scripts/improve_description.py +247 -0
- package/skills/ct-skill-creator/scripts/init_skill.py +306 -0
- package/skills/ct-skill-creator/scripts/package_skill.py +110 -0
- package/skills/ct-skill-creator/scripts/quick_validate.py +97 -0
- package/skills/ct-skill-creator/scripts/run_eval.py +310 -0
- package/skills/ct-skill-creator/scripts/run_loop.py +328 -0
- package/skills/ct-skill-creator/scripts/utils.py +47 -0
- package/skills/ct-skill-validator/SKILL.md +178 -0
- package/skills/ct-skill-validator/agents/ecosystem-checker.md +151 -0
- package/skills/ct-skill-validator/assets/valid-skill-example.md +13 -0
- package/skills/ct-skill-validator/evals/eval_set.json +14 -0
- package/skills/ct-skill-validator/evals/evals.json +52 -0
- package/skills/ct-skill-validator/manifest-entry.json +20 -0
- package/skills/ct-skill-validator/references/cleo-ecosystem-rules.md +163 -0
- package/skills/ct-skill-validator/references/validation-rules.md +168 -0
- package/skills/ct-skill-validator/scripts/__init__.py +0 -0
- package/skills/ct-skill-validator/scripts/__pycache__/audit_body.cpython-314.pyc +0 -0
- package/skills/ct-skill-validator/scripts/__pycache__/check_ecosystem.cpython-314.pyc +0 -0
- package/skills/ct-skill-validator/scripts/__pycache__/generate_validation_report.cpython-314.pyc +0 -0
- package/skills/ct-skill-validator/scripts/__pycache__/validate.cpython-314.pyc +0 -0
- package/skills/ct-skill-validator/scripts/audit_body.py +242 -0
- package/skills/ct-skill-validator/scripts/check_ecosystem.py +169 -0
- package/skills/ct-skill-validator/scripts/check_manifest.py +172 -0
- package/skills/ct-skill-validator/scripts/generate_validation_report.py +442 -0
- package/skills/ct-skill-validator/scripts/validate.py +422 -0
- package/skills/ct-spec-writer/SKILL.md +189 -0
- package/skills/ct-stickynote/README.md +14 -0
- package/skills/ct-stickynote/SKILL.md +46 -0
- package/skills/ct-task-executor/SKILL.md +296 -0
- package/skills/ct-validator/SKILL.md +216 -0
- package/skills/manifest.json +469 -0
- package/skills.json +281 -0
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
CLEO manifest alignment checker.
|
|
4
|
+
Usage: check_manifest.py <skill-directory> <manifest-json> [--dispatch-config dispatch-config.json]
|
|
5
|
+
"""
|
|
6
|
+
import sys
|
|
7
|
+
import json
|
|
8
|
+
import re
|
|
9
|
+
import yaml
|
|
10
|
+
import argparse
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
|
|
13
|
+
MANIFEST_REQUIRED_FIELDS = [
|
|
14
|
+
"name", "version", "description", "path", "status",
|
|
15
|
+
"tier", "token_budget", "capabilities", "constraints",
|
|
16
|
+
]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def check_manifest(skill_path, manifest_path, dispatch_config_path=None):
|
|
20
|
+
"""Check manifest alignment for a skill."""
|
|
21
|
+
skill_dir = Path(skill_path).resolve()
|
|
22
|
+
skill_name = skill_dir.name
|
|
23
|
+
manifest_file = Path(manifest_path).resolve()
|
|
24
|
+
errors = 0
|
|
25
|
+
warnings = 0
|
|
26
|
+
|
|
27
|
+
def error(msg):
|
|
28
|
+
nonlocal errors
|
|
29
|
+
errors += 1
|
|
30
|
+
print(f" \u274c ERROR: {msg}")
|
|
31
|
+
|
|
32
|
+
def warn(msg):
|
|
33
|
+
nonlocal warnings
|
|
34
|
+
warnings += 1
|
|
35
|
+
print(f" \u26a0\ufe0f WARN: {msg}")
|
|
36
|
+
|
|
37
|
+
def ok(msg):
|
|
38
|
+
print(f" \u2705 {msg}")
|
|
39
|
+
|
|
40
|
+
print(f"\n=== CLEO Manifest Check: {skill_name} ===\n")
|
|
41
|
+
|
|
42
|
+
# ── Read SKILL.md frontmatter ───────────────────────────────────────
|
|
43
|
+
print("--- SKILL.md ---")
|
|
44
|
+
skill_md = skill_dir / "SKILL.md"
|
|
45
|
+
if not skill_md.exists():
|
|
46
|
+
error("SKILL.md does not exist")
|
|
47
|
+
_print_summary(errors, warnings)
|
|
48
|
+
return errors
|
|
49
|
+
|
|
50
|
+
raw_content = skill_md.read_text(encoding="utf-8")
|
|
51
|
+
fm_match = re.match(r"^---\n(.*?)\n---", raw_content, re.DOTALL)
|
|
52
|
+
if not fm_match:
|
|
53
|
+
error("Could not extract frontmatter from SKILL.md")
|
|
54
|
+
_print_summary(errors, warnings)
|
|
55
|
+
return errors
|
|
56
|
+
|
|
57
|
+
try:
|
|
58
|
+
frontmatter = yaml.safe_load(fm_match.group(1))
|
|
59
|
+
except yaml.YAMLError as e:
|
|
60
|
+
error(f"Frontmatter YAML parse error: {e}")
|
|
61
|
+
_print_summary(errors, warnings)
|
|
62
|
+
return errors
|
|
63
|
+
|
|
64
|
+
if not isinstance(frontmatter, dict):
|
|
65
|
+
error("Frontmatter is not a dict")
|
|
66
|
+
_print_summary(errors, warnings)
|
|
67
|
+
return errors
|
|
68
|
+
|
|
69
|
+
fm_name = frontmatter.get("name", skill_name)
|
|
70
|
+
ok(f"SKILL.md frontmatter read (name: '{fm_name}')")
|
|
71
|
+
|
|
72
|
+
# ── Read manifest.json ──────────────────────────────────────────────
|
|
73
|
+
print("\n--- Manifest ---")
|
|
74
|
+
if not manifest_file.exists():
|
|
75
|
+
error(f"Manifest file not found: {manifest_path}")
|
|
76
|
+
_print_summary(errors, warnings)
|
|
77
|
+
return errors
|
|
78
|
+
|
|
79
|
+
try:
|
|
80
|
+
manifest_data = json.loads(manifest_file.read_text(encoding="utf-8"))
|
|
81
|
+
except json.JSONDecodeError as e:
|
|
82
|
+
error(f"Manifest is not valid JSON: {e}")
|
|
83
|
+
_print_summary(errors, warnings)
|
|
84
|
+
return errors
|
|
85
|
+
|
|
86
|
+
ok("Manifest parsed successfully")
|
|
87
|
+
|
|
88
|
+
skills_list = manifest_data.get("skills", [])
|
|
89
|
+
matching = [s for s in skills_list if s.get("name") == fm_name]
|
|
90
|
+
|
|
91
|
+
if not matching:
|
|
92
|
+
error(f"Skill '{fm_name}' not found in manifest.json skills[] array")
|
|
93
|
+
_print_summary(errors, warnings)
|
|
94
|
+
return errors
|
|
95
|
+
|
|
96
|
+
ok(f"Skill '{fm_name}' found in manifest.json")
|
|
97
|
+
entry = matching[0]
|
|
98
|
+
|
|
99
|
+
# Check required fields
|
|
100
|
+
print("\n--- Required Fields ---")
|
|
101
|
+
missing_fields = []
|
|
102
|
+
for field in MANIFEST_REQUIRED_FIELDS:
|
|
103
|
+
if field not in entry:
|
|
104
|
+
warn(f"Missing required field: '{field}'")
|
|
105
|
+
missing_fields.append(field)
|
|
106
|
+
else:
|
|
107
|
+
ok(f"'{field}' present")
|
|
108
|
+
|
|
109
|
+
# ── Dispatch config check ───────────────────────────────────────────
|
|
110
|
+
if dispatch_config_path:
|
|
111
|
+
print("\n--- Dispatch Config ---")
|
|
112
|
+
dc_file = Path(dispatch_config_path).resolve()
|
|
113
|
+
if not dc_file.exists():
|
|
114
|
+
error(f"Dispatch config not found: {dispatch_config_path}")
|
|
115
|
+
else:
|
|
116
|
+
try:
|
|
117
|
+
dc_data = json.loads(dc_file.read_text(encoding="utf-8"))
|
|
118
|
+
except json.JSONDecodeError as e:
|
|
119
|
+
error(f"Dispatch config is not valid JSON: {e}")
|
|
120
|
+
dc_data = None
|
|
121
|
+
|
|
122
|
+
if dc_data is not None:
|
|
123
|
+
overrides = dc_data.get("skill_overrides", {})
|
|
124
|
+
if fm_name not in overrides:
|
|
125
|
+
warn(f"Skill '{fm_name}' not found in dispatch-config.json skill_overrides")
|
|
126
|
+
else:
|
|
127
|
+
ok(f"Skill '{fm_name}' found in dispatch-config.json")
|
|
128
|
+
|
|
129
|
+
_print_summary(errors, warnings)
|
|
130
|
+
return errors
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def _print_summary(errors, warnings):
|
|
134
|
+
"""Print the check summary."""
|
|
135
|
+
print(f"\n=== SUMMARY ===")
|
|
136
|
+
print(f"Errors: {errors}")
|
|
137
|
+
print(f"Warnings: {warnings}")
|
|
138
|
+
|
|
139
|
+
if errors > 0:
|
|
140
|
+
print(f"Result: FAIL")
|
|
141
|
+
elif warnings > 0:
|
|
142
|
+
print(f"Result: PASS (with warnings)")
|
|
143
|
+
else:
|
|
144
|
+
print(f"Result: PASS")
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def main():
|
|
148
|
+
parser = argparse.ArgumentParser(
|
|
149
|
+
description="CLEO manifest alignment checker"
|
|
150
|
+
)
|
|
151
|
+
parser.add_argument("skill_dir", help="Path to the skill directory")
|
|
152
|
+
parser.add_argument("manifest", help="Path to manifest.json")
|
|
153
|
+
parser.add_argument("--dispatch-config", help="Path to dispatch-config.json")
|
|
154
|
+
|
|
155
|
+
args = parser.parse_args()
|
|
156
|
+
|
|
157
|
+
skill_path = Path(args.skill_dir).resolve()
|
|
158
|
+
if not skill_path.is_dir():
|
|
159
|
+
print(f"Error: '{args.skill_dir}' is not a directory", file=sys.stderr)
|
|
160
|
+
sys.exit(1)
|
|
161
|
+
|
|
162
|
+
error_count = check_manifest(
|
|
163
|
+
skill_path,
|
|
164
|
+
args.manifest,
|
|
165
|
+
dispatch_config_path=args.dispatch_config,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
sys.exit(1 if error_count > 0 else 0)
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
if __name__ == "__main__":
|
|
172
|
+
main()
|
|
@@ -0,0 +1,442 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Generate a full 3-phase HTML validation report for a CLEO skill.
|
|
3
|
+
|
|
4
|
+
Phase 1: Structural compliance (validate.py tiers 1-5)
|
|
5
|
+
Phase 2: CLEO Ecosystem compliance (ecosystem-check.json, if provided)
|
|
6
|
+
Phase 3: Quality eval results (grading.json, comparison.json, if provided)
|
|
7
|
+
|
|
8
|
+
Opens the report in the user's browser automatically.
|
|
9
|
+
|
|
10
|
+
Usage:
|
|
11
|
+
python generate_validation_report.py <skill-dir>
|
|
12
|
+
python generate_validation_report.py <skill-dir> --manifest path/to/manifest.json
|
|
13
|
+
python generate_validation_report.py <skill-dir> --ecosystem-check ecosystem-check.json
|
|
14
|
+
python generate_validation_report.py <skill-dir> --grading grading.json --comparison comparison.json
|
|
15
|
+
python generate_validation_report.py <skill-dir> --output report.html
|
|
16
|
+
python generate_validation_report.py <skill-dir> --no-open
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
import argparse
|
|
20
|
+
import html
|
|
21
|
+
import json
|
|
22
|
+
import subprocess
|
|
23
|
+
import sys
|
|
24
|
+
import tempfile
|
|
25
|
+
import time
|
|
26
|
+
import webbrowser
|
|
27
|
+
from pathlib import Path
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
TIER_NAMES = {
|
|
31
|
+
1: "Tier 1 — Structure",
|
|
32
|
+
2: "Tier 2 — Frontmatter Quality",
|
|
33
|
+
3: "Tier 3 — Body Quality",
|
|
34
|
+
4: "Tier 4 — CLEO Integration",
|
|
35
|
+
5: "Tier 5 — Provider Compatibility",
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def run_validate(skill_path: Path, manifest: str | None, dispatch_config: str | None, provider_map: str | None) -> dict:
|
|
40
|
+
"""Run validate.py --json and return parsed output."""
|
|
41
|
+
script = Path(__file__).parent / "validate.py"
|
|
42
|
+
cmd = [sys.executable, str(script), str(skill_path), "--json"]
|
|
43
|
+
if manifest:
|
|
44
|
+
cmd.extend(["--manifest", manifest])
|
|
45
|
+
if dispatch_config:
|
|
46
|
+
cmd.extend(["--dispatch-config", dispatch_config])
|
|
47
|
+
if provider_map:
|
|
48
|
+
cmd.extend(["--provider-map", provider_map])
|
|
49
|
+
result = subprocess.run(cmd, capture_output=True, text=True)
|
|
50
|
+
try:
|
|
51
|
+
return json.loads(result.stdout)
|
|
52
|
+
except json.JSONDecodeError:
|
|
53
|
+
return {
|
|
54
|
+
"skill_name": skill_path.name,
|
|
55
|
+
"results": [{"tier": 1, "severity": "ERROR", "message": f"validate.py failed: {result.stderr.strip()}"}],
|
|
56
|
+
"errors": 1, "warnings": 0, "passed": False,
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def run_audit_body(skill_path: Path) -> list[dict] | None:
|
|
61
|
+
"""Run audit_body.py --json and return parsed findings."""
|
|
62
|
+
script = Path(__file__).parent / "audit_body.py"
|
|
63
|
+
if not script.exists():
|
|
64
|
+
return None
|
|
65
|
+
result = subprocess.run([sys.executable, str(script), str(skill_path), "--json"], capture_output=True, text=True)
|
|
66
|
+
if result.stdout.strip():
|
|
67
|
+
try:
|
|
68
|
+
return json.loads(result.stdout)
|
|
69
|
+
except json.JSONDecodeError:
|
|
70
|
+
pass
|
|
71
|
+
return None
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _phase_header(phase_num: int, title: str, status: str) -> str:
|
|
75
|
+
color = {"PASS": "#788c5d", "PASS_WITH_WARNINGS": "#d97706", "FAIL": "#c44", "PENDING": "#b0aea5"}.get(status, "#b0aea5")
|
|
76
|
+
label = {"PASS": "PASS", "PASS_WITH_WARNINGS": "PASS ⚠", "FAIL": "FAIL", "PENDING": "—"}.get(status, status)
|
|
77
|
+
return f"""
|
|
78
|
+
<div class="phase-header">
|
|
79
|
+
<span class="phase-num">Phase {phase_num}</span>
|
|
80
|
+
<span class="phase-title">{html.escape(title)}</span>
|
|
81
|
+
<span class="phase-badge" style="color:{color}">{label}</span>
|
|
82
|
+
</div>
|
|
83
|
+
"""
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def _tier_section(tier_num: int, tier_results: list[dict]) -> str:
|
|
87
|
+
tier_name = html.escape(TIER_NAMES.get(tier_num, f"Tier {tier_num}"))
|
|
88
|
+
t_errors = sum(1 for r in tier_results if r["severity"] == "ERROR")
|
|
89
|
+
t_warns = sum(1 for r in tier_results if r["severity"] == "WARN")
|
|
90
|
+
if t_errors:
|
|
91
|
+
indicator = f" — {t_errors} error(s)"
|
|
92
|
+
elif t_warns:
|
|
93
|
+
indicator = f" — {t_warns} warning(s)"
|
|
94
|
+
else:
|
|
95
|
+
indicator = " — OK"
|
|
96
|
+
|
|
97
|
+
rows = ""
|
|
98
|
+
for r in tier_results:
|
|
99
|
+
sev = r["severity"]
|
|
100
|
+
msg = html.escape(r["message"])
|
|
101
|
+
if sev == "OK":
|
|
102
|
+
rows += f' <div class="finding finding-ok"><span class="icon">✅</span><span>{msg}</span></div>\n'
|
|
103
|
+
elif sev == "WARN":
|
|
104
|
+
rows += f' <div class="finding finding-warn"><span class="icon">⚠️</span><span>{msg}</span></div>\n'
|
|
105
|
+
elif sev == "ERROR":
|
|
106
|
+
rows += f' <div class="finding finding-error"><span class="icon">❌</span><span>{msg}</span></div>\n'
|
|
107
|
+
|
|
108
|
+
return f"""
|
|
109
|
+
<div class="tier-section">
|
|
110
|
+
<div class="tier-header">{tier_name}{indicator}</div>
|
|
111
|
+
{rows} </div>
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def _ecosystem_section(eco: dict) -> str:
|
|
116
|
+
verdict = eco.get("verdict", "UNKNOWN")
|
|
117
|
+
verdict_color = {"PASS": "#788c5d", "PASS_WITH_WARNINGS": "#d97706", "FAIL": "#c44"}.get(verdict, "#b0aea5")
|
|
118
|
+
summary = eco.get("summary", {})
|
|
119
|
+
primary_domain = eco.get("primary_domain", "—")
|
|
120
|
+
lifecycle = eco.get("lifecycle_stages_served", [])
|
|
121
|
+
recommendations = eco.get("recommendations", [])
|
|
122
|
+
rules = eco.get("rules", [])
|
|
123
|
+
|
|
124
|
+
rule_rows = ""
|
|
125
|
+
for rule in rules:
|
|
126
|
+
status = rule.get("status", "")
|
|
127
|
+
icon = {"OK": "✅", "WARN": "⚠️", "ERROR": "❌", "SKIP": "⏭"}.get(status, "•")
|
|
128
|
+
css = {"OK": "finding-ok", "WARN": "finding-warn", "ERROR": "finding-error", "SKIP": "finding-skip"}.get(status, "")
|
|
129
|
+
rule_name = html.escape(f"Rule {rule.get('rule_id', '?')} — {rule.get('rule_name', '')}")
|
|
130
|
+
finding = html.escape(rule.get("finding", ""))
|
|
131
|
+
evidence = html.escape(rule.get("evidence", ""))
|
|
132
|
+
ev_block = f'<div class="evidence">Evidence: {evidence}</div>' if evidence else ""
|
|
133
|
+
rule_rows += f""" <div class="finding {css}">
|
|
134
|
+
<span class="icon">{icon}</span>
|
|
135
|
+
<span><strong>{rule_name}</strong><br>{finding}{ev_block}</span>
|
|
136
|
+
</div>\n"""
|
|
137
|
+
|
|
138
|
+
rec_items = "".join(f"<li>{html.escape(r)}</li>" for r in recommendations) if recommendations else ""
|
|
139
|
+
rec_block = f'<div class="recommendations"><strong>Recommendations:</strong><ol>{rec_items}</ol></div>' if rec_items else ""
|
|
140
|
+
|
|
141
|
+
return f"""
|
|
142
|
+
<div class="tier-section" style="border-color:#3a5a8c">
|
|
143
|
+
<div class="tier-header" style="background:#2c3e5a">
|
|
144
|
+
CLEO Ecosystem Compliance — Primary domain: {html.escape(primary_domain)} | Lifecycle: {html.escape(", ".join(lifecycle) or "—")}
|
|
145
|
+
<span style="float:right;color:{verdict_color}">{html.escape(verdict)}</span>
|
|
146
|
+
</div>
|
|
147
|
+
<div style="padding:10px 16px;font-size:0.8rem;color:#6b6b6b;background:#f7f9fc;border-bottom:1px solid #dde6f0">
|
|
148
|
+
Errors: {summary.get("errors", 0)} | Warnings: {summary.get("warnings", 0)} | Skipped: {summary.get("skipped", 0)} | Passed: {summary.get("passed", 0)}
|
|
149
|
+
</div>
|
|
150
|
+
{rule_rows} {rec_block}
|
|
151
|
+
</div>
|
|
152
|
+
"""
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def _grading_section(grading: dict) -> str:
|
|
156
|
+
summary = grading.get("summary", {})
|
|
157
|
+
expectations = grading.get("expectations", [])
|
|
158
|
+
pass_rate = summary.get("pass_rate", 0)
|
|
159
|
+
color = "#788c5d" if pass_rate >= 0.8 else ("#d97706" if pass_rate >= 0.5 else "#c44")
|
|
160
|
+
|
|
161
|
+
rows = ""
|
|
162
|
+
for exp in expectations:
|
|
163
|
+
passed = exp.get("passed", False)
|
|
164
|
+
icon = "✅" if passed else "❌"
|
|
165
|
+
css = "finding-ok" if passed else "finding-error"
|
|
166
|
+
text = html.escape(exp.get("text", ""))
|
|
167
|
+
evidence = html.escape(exp.get("evidence", ""))
|
|
168
|
+
rows += f' <div class="finding {css}"><span class="icon">{icon}</span><span><strong>{text}</strong><br><span class="evidence">{evidence}</span></span></div>\n'
|
|
169
|
+
|
|
170
|
+
return f"""
|
|
171
|
+
<div class="tier-section" style="border-color:#5a3a8c">
|
|
172
|
+
<div class="tier-header" style="background:#3a2a5a">
|
|
173
|
+
Quality Eval — Grading Results
|
|
174
|
+
<span style="float:right;color:{color}">{summary.get("passed", 0)}/{summary.get("total", 0)} passed ({pass_rate:.0%})</span>
|
|
175
|
+
</div>
|
|
176
|
+
{rows} </div>
|
|
177
|
+
"""
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
def _comparison_section(comparison: dict) -> str:
|
|
181
|
+
winner = comparison.get("winner", "?")
|
|
182
|
+
reasoning = html.escape(comparison.get("reasoning", ""))
|
|
183
|
+
q_a = comparison.get("output_quality", {}).get("A", {})
|
|
184
|
+
q_b = comparison.get("output_quality", {}).get("B", {})
|
|
185
|
+
|
|
186
|
+
def quality_block(label: str, q: dict) -> str:
|
|
187
|
+
score = q.get("score", "?")
|
|
188
|
+
strengths = "".join(f"<li>{html.escape(s)}</li>" for s in q.get("strengths", []))
|
|
189
|
+
weaknesses = "".join(f"<li>{html.escape(w)}</li>" for w in q.get("weaknesses", []))
|
|
190
|
+
return f"""<div class="ab-block"><strong>Output {label} (score: {score}/10)</strong>
|
|
191
|
+
<div style="margin-top:6px"><em>Strengths:</em><ul>{strengths}</ul></div>
|
|
192
|
+
<div><em>Weaknesses:</em><ul>{weaknesses}</ul></div></div>"""
|
|
193
|
+
|
|
194
|
+
winner_color = "#788c5d" if winner == "A" else ("#c44" if winner == "B" else "#d97706")
|
|
195
|
+
return f"""
|
|
196
|
+
<div class="tier-section" style="border-color:#5a3a8c">
|
|
197
|
+
<div class="tier-header" style="background:#3a2a5a">
|
|
198
|
+
Quality Eval — A/B Comparison
|
|
199
|
+
<span style="float:right;color:{winner_color}">Winner: {html.escape(winner)}</span>
|
|
200
|
+
</div>
|
|
201
|
+
<div style="padding:12px 16px;font-size:0.875rem">
|
|
202
|
+
<p><strong>Reasoning:</strong> {reasoning}</p>
|
|
203
|
+
<div class="ab-grid">
|
|
204
|
+
{quality_block("A", q_a)}
|
|
205
|
+
{quality_block("B", q_b)}
|
|
206
|
+
</div>
|
|
207
|
+
</div>
|
|
208
|
+
</div>
|
|
209
|
+
"""
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def generate_html(
|
|
213
|
+
validation: dict,
|
|
214
|
+
audit: list[dict] | None,
|
|
215
|
+
ecosystem: dict | None,
|
|
216
|
+
grading: dict | None,
|
|
217
|
+
comparison: dict | None,
|
|
218
|
+
skill_path: Path,
|
|
219
|
+
) -> str:
|
|
220
|
+
"""Generate a self-contained 3-phase HTML validation report."""
|
|
221
|
+
skill_name = html.escape(validation.get("skill_name", skill_path.name))
|
|
222
|
+
s_errors = validation.get("errors", 0)
|
|
223
|
+
s_warnings = validation.get("warnings", 0)
|
|
224
|
+
s_passed = validation.get("passed", s_errors == 0)
|
|
225
|
+
|
|
226
|
+
eco_verdict = ecosystem.get("verdict", "PENDING") if ecosystem else "PENDING"
|
|
227
|
+
eco_passed = eco_verdict in ("PASS", "PASS_WITH_WARNINGS") if ecosystem else None
|
|
228
|
+
|
|
229
|
+
# Compute overall
|
|
230
|
+
phase1_status = "PASS" if s_passed and s_warnings == 0 else ("PASS_WITH_WARNINGS" if s_passed else "FAIL")
|
|
231
|
+
phase2_status = eco_verdict if ecosystem else "PENDING"
|
|
232
|
+
phase3_status = "PASS" if grading and grading.get("summary", {}).get("pass_rate", 0) >= 0.8 else ("FAIL" if grading else "PENDING")
|
|
233
|
+
|
|
234
|
+
all_passed = s_passed and (eco_passed is not False) and (grading is None or phase3_status == "PASS")
|
|
235
|
+
overall_label = "ALL PHASES PASS" if all_passed else "ISSUES FOUND"
|
|
236
|
+
overall_color = "#788c5d" if all_passed else "#c44"
|
|
237
|
+
|
|
238
|
+
# Build tier sections
|
|
239
|
+
results = validation.get("results", [])
|
|
240
|
+
tiers: dict[int, list[dict]] = {}
|
|
241
|
+
for r in results:
|
|
242
|
+
t = r.get("tier", 0)
|
|
243
|
+
tiers.setdefault(t, []).append(r)
|
|
244
|
+
|
|
245
|
+
tier_html = "".join(_tier_section(t, tiers[t]) for t in sorted(tiers))
|
|
246
|
+
|
|
247
|
+
audit_html = ""
|
|
248
|
+
if audit is not None:
|
|
249
|
+
if not audit:
|
|
250
|
+
audit_html = '<div class="tier-section"><div class="tier-header">Body Quality Audit (audit_body)</div><div class="no-issues">No issues found.</div></div>'
|
|
251
|
+
else:
|
|
252
|
+
items = "".join(f'<div class="finding finding-warn"><span class="icon">⚠️</span><span>{html.escape(str(i))}</span></div>' for i in audit)
|
|
253
|
+
audit_html = f'<div class="tier-section"><div class="tier-header">Body Quality Audit (audit_body)</div>{items}</div>'
|
|
254
|
+
|
|
255
|
+
eco_html = _ecosystem_section(ecosystem) if ecosystem else '<div class="tier-section pending"><div class="tier-header">CLEO Ecosystem Compliance — Not yet run</div><div class="no-issues">Run: python check_ecosystem.py <skill-dir> | ecosystem-checker agent | save to ecosystem-check.json</div></div>'
|
|
256
|
+
|
|
257
|
+
grading_html = _grading_section(grading) if grading else '<div class="tier-section pending"><div class="tier-header" style="background:#3a2a5a">Quality Eval — Grading not yet run</div><div class="no-issues">Run A/B eval using ct-skill-creator agents/grader.md then pass --grading grading.json</div></div>'
|
|
258
|
+
|
|
259
|
+
comparison_html = _comparison_section(comparison) if comparison else ""
|
|
260
|
+
|
|
261
|
+
return f"""<!DOCTYPE html>
|
|
262
|
+
<html>
|
|
263
|
+
<head>
|
|
264
|
+
<meta charset="utf-8">
|
|
265
|
+
<title>{skill_name} — CLEO Full Validation Report</title>
|
|
266
|
+
<link rel="preconnect" href="https://fonts.googleapis.com">
|
|
267
|
+
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
|
268
|
+
<link href="https://fonts.googleapis.com/css2?family=Poppins:wght@500;600&family=Lora:wght@400;500&display=swap" rel="stylesheet">
|
|
269
|
+
<style>
|
|
270
|
+
body {{ font-family: 'Lora', Georgia, serif; max-width: 900px; margin: 0 auto; padding: 32px 24px; background: #faf9f5; color: #141413; }}
|
|
271
|
+
h1 {{ font-family: 'Poppins', sans-serif; color: #141413; margin-bottom: 4px; }}
|
|
272
|
+
h2 {{ font-family: 'Poppins', sans-serif; font-size: 0.95rem; color: #141413; margin: 24px 0 8px; text-transform: uppercase; letter-spacing: 0.06em; }}
|
|
273
|
+
.subtitle {{ color: #b0aea5; font-size: 0.875rem; margin-bottom: 24px; }}
|
|
274
|
+
.overall-box {{ background: white; border: 2px solid {overall_color}; border-radius: 8px; padding: 18px 24px; margin-bottom: 24px; display: flex; align-items: center; gap: 20px; }}
|
|
275
|
+
.overall-badge {{ font-family: 'Poppins', sans-serif; font-size: 1.3rem; font-weight: 600; color: {overall_color}; flex-shrink: 0; }}
|
|
276
|
+
.phase-grid {{ display: grid; grid-template-columns: 1fr 1fr 1fr; gap: 12px; margin-bottom: 28px; }}
|
|
277
|
+
.phase-card {{ background: white; border: 1px solid #e8e6dc; border-radius: 8px; padding: 14px 16px; }}
|
|
278
|
+
.phase-card .label {{ font-family: 'Poppins', sans-serif; font-size: 0.7rem; text-transform: uppercase; letter-spacing: 0.08em; color: #b0aea5; }}
|
|
279
|
+
.phase-card .name {{ font-size: 0.875rem; margin: 4px 0; font-weight: 500; }}
|
|
280
|
+
.phase-card .status {{ font-family: 'Poppins', sans-serif; font-size: 0.9rem; font-weight: 600; }}
|
|
281
|
+
.phase-header {{ display: flex; align-items: center; gap: 10px; margin: 24px 0 8px; font-family: 'Poppins', sans-serif; }}
|
|
282
|
+
.phase-num {{ background: #141413; color: white; border-radius: 50%; width: 22px; height: 22px; display: flex; align-items: center; justify-content: center; font-size: 0.7rem; font-weight: 600; flex-shrink: 0; }}
|
|
283
|
+
.phase-title {{ font-size: 0.9rem; font-weight: 600; flex: 1; }}
|
|
284
|
+
.phase-badge {{ font-size: 0.85rem; font-weight: 600; }}
|
|
285
|
+
.tier-section {{ background: white; border: 1px solid #e8e6dc; border-radius: 8px; margin-bottom: 12px; overflow: hidden; }}
|
|
286
|
+
.tier-section.pending {{ opacity: 0.6; }}
|
|
287
|
+
.tier-header {{ font-family: 'Poppins', sans-serif; font-size: 0.8rem; font-weight: 600; padding: 10px 16px; background: #141413; color: #faf9f5; }}
|
|
288
|
+
.finding {{ display: flex; align-items: flex-start; gap: 10px; padding: 9px 16px; border-bottom: 1px solid #f0ede3; font-size: 0.85rem; line-height: 1.5; }}
|
|
289
|
+
.finding:last-child {{ border-bottom: none; }}
|
|
290
|
+
.icon {{ flex-shrink: 0; }}
|
|
291
|
+
.finding-ok {{ color: #3b3b3b; }}
|
|
292
|
+
.finding-warn {{ color: #92400e; background: #fffbeb; }}
|
|
293
|
+
.finding-error {{ color: #7f1d1d; background: #fef2f2; }}
|
|
294
|
+
.finding-skip {{ color: #6b6b6b; background: #f5f5f5; }}
|
|
295
|
+
.evidence {{ font-size: 0.78rem; color: #888; font-style: italic; margin-top: 3px; display: block; }}
|
|
296
|
+
.no-issues {{ padding: 12px 16px; color: #788c5d; font-size: 0.875rem; }}
|
|
297
|
+
.recommendations {{ padding: 10px 16px; font-size: 0.85rem; background: #fffbeb; }}
|
|
298
|
+
.recommendations ol {{ margin: 4px 0 0 16px; padding: 0; }}
|
|
299
|
+
.ab-grid {{ display: grid; grid-template-columns: 1fr 1fr; gap: 16px; margin-top: 10px; }}
|
|
300
|
+
.ab-block {{ background: #f7f7f5; border-radius: 6px; padding: 10px 14px; font-size: 0.8rem; }}
|
|
301
|
+
.ab-block ul {{ margin: 4px 0 0 16px; padding: 0; }}
|
|
302
|
+
.footer {{ margin-top: 32px; font-size: 0.75rem; color: #b0aea5; text-align: center; }}
|
|
303
|
+
</style>
|
|
304
|
+
</head>
|
|
305
|
+
<body>
|
|
306
|
+
<h1>{skill_name}</h1>
|
|
307
|
+
<div class="subtitle">CLEO Full Validation Report — {time.strftime("%Y-%m-%d %H:%M")}</div>
|
|
308
|
+
|
|
309
|
+
<div class="overall-box">
|
|
310
|
+
<div class="overall-badge">{html.escape(overall_label)}</div>
|
|
311
|
+
<div style="font-size:0.875rem;color:#6b6b6b">
|
|
312
|
+
Structural: {s_errors} errors, {s_warnings} warnings |
|
|
313
|
+
Ecosystem: {html.escape(eco_verdict)} |
|
|
314
|
+
Quality: {html.escape(phase3_status)}
|
|
315
|
+
</div>
|
|
316
|
+
</div>
|
|
317
|
+
|
|
318
|
+
<div class="phase-grid">
|
|
319
|
+
<div class="phase-card">
|
|
320
|
+
<div class="label">Phase 1</div>
|
|
321
|
+
<div class="name">Structural Compliance</div>
|
|
322
|
+
<div class="status" style="color:{'#788c5d' if phase1_status == 'PASS' else ('#d97706' if 'WARN' in phase1_status else '#c44')}">{html.escape(phase1_status)}</div>
|
|
323
|
+
</div>
|
|
324
|
+
<div class="phase-card">
|
|
325
|
+
<div class="label">Phase 2</div>
|
|
326
|
+
<div class="name">CLEO Ecosystem Fit</div>
|
|
327
|
+
<div class="status" style="color:{'#788c5d' if phase2_status == 'PASS' else ('#d97706' if 'WARN' in phase2_status else ('#b0aea5' if phase2_status == 'PENDING' else '#c44'))}">{html.escape(phase2_status)}</div>
|
|
328
|
+
</div>
|
|
329
|
+
<div class="phase-card">
|
|
330
|
+
<div class="label">Phase 3</div>
|
|
331
|
+
<div class="name">Quality A/B Eval</div>
|
|
332
|
+
<div class="status" style="color:{'#788c5d' if phase3_status == 'PASS' else ('#b0aea5' if phase3_status == 'PENDING' else '#c44')}">{html.escape(phase3_status)}</div>
|
|
333
|
+
</div>
|
|
334
|
+
</div>
|
|
335
|
+
|
|
336
|
+
<h2>Phase 1 — Structural Compliance</h2>
|
|
337
|
+
{tier_html}
|
|
338
|
+
{audit_html}
|
|
339
|
+
|
|
340
|
+
<h2>Phase 2 — CLEO Ecosystem Fit</h2>
|
|
341
|
+
{eco_html}
|
|
342
|
+
|
|
343
|
+
<h2>Phase 3 — Quality A/B Eval</h2>
|
|
344
|
+
{grading_html}
|
|
345
|
+
{comparison_html}
|
|
346
|
+
|
|
347
|
+
<div class="footer">Generated by ct-skill-validator — CLEO Full Validation Report</div>
|
|
348
|
+
</body>
|
|
349
|
+
</html>
|
|
350
|
+
"""
|
|
351
|
+
|
|
352
|
+
|
|
353
|
+
def main() -> None:
|
|
354
|
+
parser = argparse.ArgumentParser(description="Generate full 3-phase validation report for a CLEO skill")
|
|
355
|
+
parser.add_argument("skill_dir", help="Path to the skill directory")
|
|
356
|
+
parser.add_argument("--manifest", default=None, help="Path to manifest.json (Tier 4 check)")
|
|
357
|
+
parser.add_argument("--dispatch-config", default=None, help="Path to dispatch-config.json")
|
|
358
|
+
parser.add_argument("--provider-map", default=None, help="Path to provider-skills-map.json")
|
|
359
|
+
parser.add_argument("--ecosystem-check", default=None, help="Path to ecosystem-check.json (Phase 2 results)")
|
|
360
|
+
parser.add_argument("--grading", default=None, help="Path to grading.json (Phase 3 quality eval)")
|
|
361
|
+
parser.add_argument("--comparison", default=None, help="Path to comparison.json (Phase 3 A/B results)")
|
|
362
|
+
parser.add_argument("--audit", action="store_true", help="Run audit_body.py for deep body analysis")
|
|
363
|
+
parser.add_argument("--output", "-o", default=None, help="Write HTML to this path (default: temp file)")
|
|
364
|
+
parser.add_argument("--no-open", action="store_true", help="Do not open the report in a browser")
|
|
365
|
+
args = parser.parse_args()
|
|
366
|
+
|
|
367
|
+
skill_path = Path(args.skill_dir).resolve()
|
|
368
|
+
if not skill_path.is_dir():
|
|
369
|
+
print(f"Error: '{args.skill_dir}' is not a directory", file=sys.stderr)
|
|
370
|
+
sys.exit(1)
|
|
371
|
+
|
|
372
|
+
print(f"Phase 1: Running structural validation ...", file=sys.stderr)
|
|
373
|
+
validation = run_validate(skill_path, args.manifest, args.dispatch_config, args.provider_map)
|
|
374
|
+
|
|
375
|
+
audit = None
|
|
376
|
+
if args.audit:
|
|
377
|
+
print(f"Phase 1: Running body audit ...", file=sys.stderr)
|
|
378
|
+
audit = run_audit_body(skill_path)
|
|
379
|
+
|
|
380
|
+
ecosystem = None
|
|
381
|
+
if args.ecosystem_check:
|
|
382
|
+
eco_path = Path(args.ecosystem_check)
|
|
383
|
+
if eco_path.exists():
|
|
384
|
+
try:
|
|
385
|
+
ecosystem = json.loads(eco_path.read_text())
|
|
386
|
+
print(f"Phase 2: Loaded ecosystem check from {eco_path}", file=sys.stderr)
|
|
387
|
+
except json.JSONDecodeError as e:
|
|
388
|
+
print(f"Warning: Could not parse {eco_path}: {e}", file=sys.stderr)
|
|
389
|
+
else:
|
|
390
|
+
print(f"Warning: ecosystem-check.json not found at {eco_path}", file=sys.stderr)
|
|
391
|
+
|
|
392
|
+
grading = None
|
|
393
|
+
if args.grading:
|
|
394
|
+
grading_path = Path(args.grading)
|
|
395
|
+
if grading_path.exists():
|
|
396
|
+
try:
|
|
397
|
+
grading = json.loads(grading_path.read_text())
|
|
398
|
+
print(f"Phase 3: Loaded grading from {grading_path}", file=sys.stderr)
|
|
399
|
+
except json.JSONDecodeError:
|
|
400
|
+
pass
|
|
401
|
+
|
|
402
|
+
comparison = None
|
|
403
|
+
if args.comparison:
|
|
404
|
+
comp_path = Path(args.comparison)
|
|
405
|
+
if comp_path.exists():
|
|
406
|
+
try:
|
|
407
|
+
comparison = json.loads(comp_path.read_text())
|
|
408
|
+
print(f"Phase 3: Loaded comparison from {comp_path}", file=sys.stderr)
|
|
409
|
+
except json.JSONDecodeError:
|
|
410
|
+
pass
|
|
411
|
+
|
|
412
|
+
report_html = generate_html(validation, audit, ecosystem, grading, comparison, skill_path)
|
|
413
|
+
|
|
414
|
+
if args.output:
|
|
415
|
+
out_path = Path(args.output)
|
|
416
|
+
out_path.parent.mkdir(parents=True, exist_ok=True)
|
|
417
|
+
out_path.write_text(report_html)
|
|
418
|
+
report_path = out_path.resolve()
|
|
419
|
+
else:
|
|
420
|
+
timestamp = time.strftime("%Y%m%d_%H%M%S")
|
|
421
|
+
tmp = Path(tempfile.gettempdir()) / f"cleo_validation_{skill_path.name}_{timestamp}.html"
|
|
422
|
+
tmp.write_text(report_html)
|
|
423
|
+
report_path = tmp
|
|
424
|
+
|
|
425
|
+
s_errors = validation.get("errors", 0)
|
|
426
|
+
s_warnings = validation.get("warnings", 0)
|
|
427
|
+
s_passed = validation.get("passed", False)
|
|
428
|
+
eco_verdict = ecosystem.get("verdict", "—") if ecosystem else "not run"
|
|
429
|
+
|
|
430
|
+
print(f"\n Validation Report: {report_path}", file=sys.stderr)
|
|
431
|
+
print(f" Phase 1 (Structural): {'PASS' if s_passed else 'FAIL'} ({s_errors} errors, {s_warnings} warnings)", file=sys.stderr)
|
|
432
|
+
print(f" Phase 2 (Ecosystem): {eco_verdict}", file=sys.stderr)
|
|
433
|
+
print(f" Phase 3 (Quality): {'PASS' if grading else 'not run'}", file=sys.stderr)
|
|
434
|
+
|
|
435
|
+
if not args.no_open:
|
|
436
|
+
webbrowser.open(str(report_path))
|
|
437
|
+
|
|
438
|
+
sys.exit(0 if s_passed else 1)
|
|
439
|
+
|
|
440
|
+
|
|
441
|
+
if __name__ == "__main__":
|
|
442
|
+
main()
|